You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2016/10/19 01:39:38 UTC

[01/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Repository: hive
Updated Branches:
  refs/heads/master 399af4663 -> f562dfb52


http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/varchar_udf1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/varchar_udf1.q.out b/ql/src/test/results/clientpositive/llap/varchar_udf1.q.out
new file mode 100644
index 0000000..e5cfce5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/varchar_udf1.q.out
@@ -0,0 +1,453 @@
+PREHOOK: query: drop table varchar_udf_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table varchar_udf_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20))
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@varchar_udf_1
+PREHOOK: query: insert overwrite table varchar_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: insert overwrite table varchar_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@varchar_udf_1
+POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- UDFs with varchar support
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- UDFs with varchar support
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238val_238	238val_238	true
+PREHOOK: query: select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+VAL_238	VAL_238	true
+PREHOOK: query: select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: -- Scalar UDFs
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Scalar UDFs
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+118	118	true
+PREHOOK: query: select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238|val_238	238|val_238	true
+PREHOOK: query: select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+4	4	true
+PREHOOK: query: select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+7	7	true
+PREHOOK: query: select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+5	5	true
+PREHOOK: query: select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+        val_238	        val_238	true
+PREHOOK: query: select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+true	true	true
+PREHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238	238	true
+PREHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+replaced_238	replaced_238	true
+PREHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+832_lav	832_lav	true
+PREHOOK: query: select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238        	val_238        	true
+PREHOOK: query: select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+[["See","spot","run"],["See","jane","run"]]	[["See","spot","run"],["See","jane","run"]]
+PREHOOK: query: select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+["val","238"]	["val","238"]
+PREHOOK: query: select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+{"a":"1","b":"2","c":"3"}	{"a":"1","b":"2","c":"3"}
+PREHOOK: query: select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val	val	true
+PREHOOK: query: select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: -- Aggregate Functions
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Aggregate Functions
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}	{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}
+PREHOOK: query: select
+  min(c2),
+  min(c4)
+from varchar_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  min(c2),
+  min(c4)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238
+PREHOOK: query: select
+  max(c2),
+  max(c4)
+from varchar_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  max(c2),
+  max(c4)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238
+PREHOOK: query: drop table varchar_udf_1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@varchar_udf_1
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: drop table varchar_udf_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@varchar_udf_1
+POSTHOOK: Output: default@varchar_udf_1

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
new file mode 100644
index 0000000..1f23d44
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
@@ -0,0 +1,1000 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table varchar_udf_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table varchar_udf_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@varchar_udf_1
+PREHOOK: query: insert overwrite table varchar_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: insert overwrite table varchar_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@varchar_udf_1
+POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF_txt
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE DECIMAL_UDF_txt (key decimal(20,10), value int)
+ROW FORMAT DELIMITED
+   FIELDS TERMINATED BY ' '
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@DECIMAL_UDF_txt
+POSTHOOK: query: CREATE TABLE DECIMAL_UDF_txt (key decimal(20,10), value int)
+ROW FORMAT DELIMITED
+   FIELDS TERMINATED BY ' '
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@DECIMAL_UDF_txt
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@decimal_udf_txt
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF_txt
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@decimal_udf_txt
+PREHOOK: query: CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int)
+STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@DECIMAL_UDF
+POSTHOOK: query: CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int)
+STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@DECIMAL_UDF
+PREHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_udf_txt
+PREHOOK: Output: default@decimal_udf
+POSTHOOK: query: INSERT OVERWRITE TABLE DECIMAL_UDF SELECT * FROM DECIMAL_UDF_txt
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_udf_txt
+POSTHOOK: Output: default@decimal_udf
+POSTHOOK: Lineage: decimal_udf.key SIMPLE [(decimal_udf_txt)decimal_udf_txt.FieldSchema(name:key, type:decimal(20,10), comment:null), ]
+POSTHOOK: Lineage: decimal_udf.value SIMPLE [(decimal_udf_txt)decimal_udf_txt.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: drop table if exists count_case_groupby
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists count_case_groupby
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table count_case_groupby (key string, bool boolean) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@count_case_groupby
+POSTHOOK: query: create table count_case_groupby (key string, bool boolean) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@count_case_groupby
+PREHOOK: query: insert into table count_case_groupby values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@count_case_groupby
+POSTHOOK: query: insert into table count_case_groupby values ('key1', true),('key2', false),('key3', NULL),('key4', false),('key5',NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@count_case_groupby
+POSTHOOK: Lineage: count_case_groupby.bool EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: count_case_groupby.key SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: explain
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: c2 regexp 'val' (type: boolean), c4 regexp 'val' (type: boolean), (c2 regexp 'val' = c4 regexp 'val') (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+true	true	true
+PREHOOK: query: explain
+select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: regexp_extract(c2, 'val_([0-9]+)', 1) (type: string), regexp_extract(c4, 'val_([0-9]+)', 1) (type: string), (regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)) (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238	238	true
+PREHOOK: query: explain
+select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: regexp_replace(c2, 'val', 'replaced') (type: string), regexp_replace(c4, 'val', 'replaced') (type: string), (regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')) (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+replaced_238	replaced_238	true
+PREHOOK: query: explain
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: c2 regexp 'val' (type: boolean), c4 regexp 'val' (type: boolean), (c2 regexp 'val' = c4 regexp 'val') (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+true	true	true
+PREHOOK: query: explain
+select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: regexp_extract(c2, 'val_([0-9]+)', 1) (type: string), regexp_extract(c4, 'val_([0-9]+)', 1) (type: string), (regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)) (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238	238	true
+PREHOOK: query: explain
+select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: varchar_udf_1
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: regexp_replace(c2, 'val', 'replaced') (type: string), regexp_replace(c4, 'val', 'replaced') (type: string), (regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')) (type: boolean)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 1
+                      Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+replaced_238	replaced_238	true
+PREHOOK: query: EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_udf
+                  Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: power(key, 2) (type: double)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT POWER(key, 2) FROM DECIMAL_UDF
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_udf
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT POWER(key, 2) FROM DECIMAL_UDF
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_udf
+#### A masked pattern was here ####
+0.0
+0.0
+0.0
+0.010000000000000002
+0.04000000000000001
+0.09
+0.09
+0.10890000000000001
+0.10890000000000001
+0.11088900000000002
+0.11088900000000002
+1.0
+1.0
+1.0
+1.0E-4
+1.2544000000000002
+1.2544000000000002
+1.2544000000000002
+1.2588840000000003
+1.2588840000000003
+1.52415787532388352E18
+1.52415787532388352E18
+1.936E7
+100.0
+10000.0
+15376.0
+15675.04
+1576255.1401
+4.0
+4.0
+4.0E-4
+400.0
+40000.0
+9.8596
+9.8596
+9.8596
+9.8596
+NULL
+PREHOOK: query: EXPLAIN
+SELECT
+  exp(key), ln(key),
+  log(key), log(key, key), log(key, value), log(value, key),
+  log10(key), sqrt(key)
+FROM DECIMAL_UDF WHERE key = 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT
+  exp(key), ln(key),
+  log(key), log(key, key), log(key, value), log(value, key),
+  log10(key), sqrt(key)
+FROM DECIMAL_UDF WHERE key = 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_udf
+                  Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key = 10) (type: boolean)
+                    Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+                      Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT
+  exp(key), ln(key),
+  log(key), log(key, key), log(key, value), log(value, key),
+  log10(key), sqrt(key)
+FROM DECIMAL_UDF WHERE key = 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_udf
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT
+  exp(key), ln(key),
+  log(key), log(key, key), log(key, value), log(value, key),
+  log10(key), sqrt(key)
+FROM DECIMAL_UDF WHERE key = 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_udf
+#### A masked pattern was here ####
+22026.465794806718	2.302585092994046	2.302585092994046	1.0	1.0	1.0	1.0	3.1622776601683795
+PREHOOK: query: EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_udf
+                  Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: power(key, 2) (type: double)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT POWER(key, 2) FROM DECIMAL_UDF
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_udf
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT POWER(key, 2) FROM DECIMAL_UDF
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_udf
+#### A masked pattern was here ####
+0.0
+0.0
+0.0
+0.010000000000000002
+0.04000000000000001
+0.09
+0.09
+0.10890000000000001
+0.10890000000000001
+0.11088900000000002
+0.11088900000000002
+1.0
+1.0
+1.0
+1.0E-4
+1.2544000000000002
+1.2544000000000002
+1.2544000000000002
+1.2588840000000003
+1.2588840000000003
+1.52415787532388352E18
+1.52415787532388352E18
+1.936E7
+100.0
+10000.0
+15376.0
+15675.04
+1576255.1401
+4.0
+4.0
+4.0E-4
+400.0
+40000.0
+9.8596
+9.8596
+9.8596
+9.8596
+NULL
+PREHOOK: query: EXPLAIN
+SELECT
+  exp(key), ln(key),
+  log(key), log(key, key), log(key, value), log(value, key),
+  log10(key), sqrt(key)
+FROM DECIMAL_UDF WHERE key = 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT
+  exp(key), ln(key),
+  log(key), log(key, key), log(key, value), log(value, key),
+  log10(key), sqrt(key)
+FROM DECIMAL_UDF WHERE key = 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_udf
+                  Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key = 10) (type: boolean)
+                    Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+                      Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT
+  exp(key), ln(key),
+  log(key), log(key, key), log(key, value), log(value, key),
+  log10(key), sqrt(key)
+FROM DECIMAL_UDF WHERE key = 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_udf
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT
+  exp(key), ln(key),
+  log(key), log(key, key), log(key, value), log(value, key),
+  log10(key), sqrt(key)
+FROM DECIMAL_UDF WHERE key = 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_udf
+#### A masked pattern was here ####
+22026.465794806718	2.302585092994046	2.302585092994046	1.0	1.0	1.0	1.0	3.1622776601683795
+PREHOOK: query: explain
+SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: count_case_groupby
+                  Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string), CASE WHEN (bool) THEN (1) WHEN ((not bool)) THEN (0) ELSE (null) END (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(_col1)
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@count_case_groupby
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@count_case_groupby
+#### A masked pattern was here ####
+key1	1
+key2	1
+key3	0
+key4	1
+key5	0
+PREHOOK: query: explain
+SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: count_case_groupby
+                  Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string), CASE WHEN (bool) THEN (1) WHEN ((not bool)) THEN (0) ELSE (null) END (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(_col1)
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@count_case_groupby
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, COUNT(CASE WHEN bool THEN 1 WHEN NOT bool THEN 0 ELSE NULL END) AS cnt_bool0_ok FROM count_case_groupby GROUP BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@count_case_groupby
+#### A masked pattern was here ####
+key1	1
+key2	1
+key3	0
+key4	1
+key5	0
+PREHOOK: query: drop table varchar_udf_1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@varchar_udf_1
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: drop table varchar_udf_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@varchar_udf_1
+POSTHOOK: Output: default@varchar_udf_1
+PREHOOK: query: DROP TABLE DECIMAL_UDF_txt
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@decimal_udf_txt
+PREHOOK: Output: default@decimal_udf_txt
+POSTHOOK: query: DROP TABLE DECIMAL_UDF_txt
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@decimal_udf_txt
+POSTHOOK: Output: default@decimal_udf_txt
+PREHOOK: query: DROP TABLE DECIMAL_UDF
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@decimal_udf
+PREHOOK: Output: default@decimal_udf
+POSTHOOK: query: DROP TABLE DECIMAL_UDF
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@decimal_udf
+POSTHOOK: Output: default@decimal_udf
+PREHOOK: query: drop table count_case_groupby
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@count_case_groupby
+PREHOOK: Output: default@count_case_groupby
+POSTHOOK: query: drop table count_case_groupby
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@count_case_groupby
+POSTHOOK: Output: default@count_case_groupby


[20/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/lateral_view.q.out b/ql/src/test/results/clientpositive/llap/lateral_view.q.out
new file mode 100644
index 0000000..bb3cfcf
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/lateral_view.q.out
@@ -0,0 +1,739 @@
+PREHOOK: query: CREATE TABLE tmp_pyang_lv (inputs string) STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tmp_pyang_lv
+POSTHOOK: query: CREATE TABLE tmp_pyang_lv (inputs string) STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tmp_pyang_lv
+PREHOOK: query: INSERT OVERWRITE TABLE tmp_pyang_lv SELECT key FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tmp_pyang_lv
+POSTHOOK: query: INSERT OVERWRITE TABLE tmp_pyang_lv SELECT key FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tmp_pyang_lv
+POSTHOOK: Lineage: tmp_pyang_lv.inputs SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: EXPLAIN SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY key ASC, myCol ASC LIMIT 1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY key ASC, myCol ASC LIMIT 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Lateral View Forward
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: key, value
+                      Statistics: Num rows: 500 Data size: 261000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col1, _col5
+                        Statistics: Num rows: 1000 Data size: 289000 Basic stats: COMPLETE Column stats: COMPLETE
+                        Select Operator
+                          expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int)
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1000 Data size: 289000 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: string), _col2 (type: int)
+                            sort order: ++
+                            Statistics: Num rows: 1000 Data size: 289000 Basic stats: COMPLETE Column stats: COMPLETE
+                            TopN Hash Memory Usage: 0.1
+                            value expressions: _col1 (type: string)
+                    Select Operator
+                      expressions: array(1,2,3) (type: array<int>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE
+                      UDTF Operator
+                        Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col1, _col5
+                          Statistics: Num rows: 1000 Data size: 289000 Basic stats: COMPLETE Column stats: COMPLETE
+                          Select Operator
+                            expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int)
+                            outputColumnNames: _col0, _col1, _col2
+                            Statistics: Num rows: 1000 Data size: 289000 Basic stats: COMPLETE Column stats: COMPLETE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string), _col2 (type: int)
+                              sort order: ++
+                              Statistics: Num rows: 1000 Data size: 289000 Basic stats: COMPLETE Column stats: COMPLETE
+                              TopN Hash Memory Usage: 0.1
+                              value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey1 (type: int)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 1
+                  Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string), _col2 (type: int)
+                    sort order: ++
+                    Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey1 (type: int)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 1
+                  Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT myTable.* FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT myTable.* FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        TableScan
+          alias: src
+          Lateral View Forward
+            Select Operator
+              Lateral View Join Operator
+                outputColumnNames: _col5
+                Select Operator
+                  expressions: _col5 (type: int)
+                  outputColumnNames: _col0
+                  Limit
+                    Number of rows: 3
+                    ListSink
+            Select Operator
+              expressions: array(1,2,3) (type: array<int>)
+              outputColumnNames: _col0
+              UDTF Operator
+                function name: explode
+                Lateral View Join Operator
+                  outputColumnNames: _col5
+                  Select Operator
+                    expressions: _col5 (type: int)
+                    outputColumnNames: _col0
+                    Limit
+                      Number of rows: 3
+                      ListSink
+
+PREHOOK: query: EXPLAIN SELECT myTable.myCol, myTable2.myCol2 FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT myTable.myCol, myTable2.myCol2 FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 9
+      Processor Tree:
+        TableScan
+          alias: src
+          Lateral View Forward
+            Select Operator
+              Lateral View Join Operator
+                outputColumnNames: _col5
+                Lateral View Forward
+                  Select Operator
+                    expressions: _col5 (type: int)
+                    outputColumnNames: _col5
+                    Lateral View Join Operator
+                      outputColumnNames: _col5, _col6
+                      Select Operator
+                        expressions: _col5 (type: int), _col6 (type: string)
+                        outputColumnNames: _col0, _col1
+                        Limit
+                          Number of rows: 9
+                          ListSink
+                  Select Operator
+                    expressions: array('a','b','c') (type: array<string>)
+                    outputColumnNames: _col0
+                    UDTF Operator
+                      function name: explode
+                      Lateral View Join Operator
+                        outputColumnNames: _col5, _col6
+                        Select Operator
+                          expressions: _col5 (type: int), _col6 (type: string)
+                          outputColumnNames: _col0, _col1
+                          Limit
+                            Number of rows: 9
+                            ListSink
+            Select Operator
+              expressions: array(1,2,3) (type: array<int>)
+              outputColumnNames: _col0
+              UDTF Operator
+                function name: explode
+                Lateral View Join Operator
+                  outputColumnNames: _col5
+                  Lateral View Forward
+                    Select Operator
+                      expressions: _col5 (type: int)
+                      outputColumnNames: _col5
+                      Lateral View Join Operator
+                        outputColumnNames: _col5, _col6
+                        Select Operator
+                          expressions: _col5 (type: int), _col6 (type: string)
+                          outputColumnNames: _col0, _col1
+                          Limit
+                            Number of rows: 9
+                            ListSink
+                    Select Operator
+                      expressions: array('a','b','c') (type: array<string>)
+                      outputColumnNames: _col0
+                      UDTF Operator
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col5, _col6
+                          Select Operator
+                            expressions: _col5 (type: int), _col6 (type: string)
+                            outputColumnNames: _col0, _col1
+                            Limit
+                              Number of rows: 9
+                              ListSink
+
+PREHOOK: query: EXPLAIN SELECT myTable2.* FROM src LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT myTable2.* FROM src LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        TableScan
+          alias: src
+          Lateral View Forward
+            Select Operator
+              Lateral View Join Operator
+                outputColumnNames: _col5
+                Lateral View Forward
+                  Select Operator
+                    Lateral View Join Operator
+                      outputColumnNames: _col6
+                      Select Operator
+                        expressions: _col6 (type: int)
+                        outputColumnNames: _col0
+                        Limit
+                          Number of rows: 3
+                          ListSink
+                  Select Operator
+                    expressions: _col5 (type: array<int>)
+                    outputColumnNames: _col0
+                    UDTF Operator
+                      function name: explode
+                      Lateral View Join Operator
+                        outputColumnNames: _col6
+                        Select Operator
+                          expressions: _col6 (type: int)
+                          outputColumnNames: _col0
+                          Limit
+                            Number of rows: 3
+                            ListSink
+            Select Operator
+              expressions: array(array(1,2,3)) (type: array<array<int>>)
+              outputColumnNames: _col0
+              UDTF Operator
+                function name: explode
+                Lateral View Join Operator
+                  outputColumnNames: _col5
+                  Lateral View Forward
+                    Select Operator
+                      Lateral View Join Operator
+                        outputColumnNames: _col6
+                        Select Operator
+                          expressions: _col6 (type: int)
+                          outputColumnNames: _col0
+                          Limit
+                            Number of rows: 3
+                            ListSink
+                    Select Operator
+                      expressions: _col5 (type: array<int>)
+                      outputColumnNames: _col0
+                      UDTF Operator
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col6
+                          Select Operator
+                            expressions: _col6 (type: int)
+                            outputColumnNames: _col0
+                            Limit
+                              Number of rows: 3
+                              ListSink
+
+PREHOOK: query: -- Verify that * selects columns from both tables
+SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY key ASC, myCol ASC LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- Verify that * selects columns from both tables
+SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY key ASC, myCol ASC LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0	1
+PREHOOK: query: -- TABLE.* should be supported
+SELECT myTable.* FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- TABLE.* should be supported
+SELECT myTable.* FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1
+2
+3
+PREHOOK: query: -- Multiple lateral views should result in a Cartesian product
+SELECT myTable.myCol, myTable2.myCol2 FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- Multiple lateral views should result in a Cartesian product
+SELECT myTable.myCol, myTable2.myCol2 FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1	a
+1	b
+1	c
+2	a
+2	b
+2	c
+3	a
+3	b
+3	c
+PREHOOK: query: -- Should be able to reference tables generated earlier
+SELECT myTable2.* FROM src LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- Should be able to reference tables generated earlier
+SELECT myTable2.* FROM src LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1
+2
+3
+PREHOOK: query: EXPLAIN
+SELECT myCol from tmp_pyang_lv LATERAL VIEW explode(array(1,2,3)) myTab as myCol limit 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT myCol from tmp_pyang_lv LATERAL VIEW explode(array(1,2,3)) myTab as myCol limit 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        TableScan
+          alias: tmp_pyang_lv
+          Lateral View Forward
+            Select Operator
+              Lateral View Join Operator
+                outputColumnNames: _col4
+                Select Operator
+                  expressions: _col4 (type: int)
+                  outputColumnNames: _col0
+                  Limit
+                    Number of rows: 3
+                    ListSink
+            Select Operator
+              expressions: array(1,2,3) (type: array<int>)
+              outputColumnNames: _col0
+              UDTF Operator
+                function name: explode
+                Lateral View Join Operator
+                  outputColumnNames: _col4
+                  Select Operator
+                    expressions: _col4 (type: int)
+                    outputColumnNames: _col0
+                    Limit
+                      Number of rows: 3
+                      ListSink
+
+PREHOOK: query: SELECT myCol from tmp_PYANG_lv LATERAL VIEW explode(array(1,2,3)) myTab as myCol limit 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp_pyang_lv
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT myCol from tmp_PYANG_lv LATERAL VIEW explode(array(1,2,3)) myTab as myCol limit 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp_pyang_lv
+#### A masked pattern was here ####
+1
+2
+3
+PREHOOK: query: CREATE TABLE tmp_pyang_src_rcfile (key string, value array<string>) STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tmp_pyang_src_rcfile
+POSTHOOK: query: CREATE TABLE tmp_pyang_src_rcfile (key string, value array<string>) STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tmp_pyang_src_rcfile
+PREHOOK: query: INSERT OVERWRITE TABLE tmp_pyang_src_rcfile SELECT key, array(value) FROM src ORDER BY key LIMIT 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tmp_pyang_src_rcfile
+POSTHOOK: query: INSERT OVERWRITE TABLE tmp_pyang_src_rcfile SELECT key, array(value) FROM src ORDER BY key LIMIT 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tmp_pyang_src_rcfile
+POSTHOOK: Lineage: tmp_pyang_src_rcfile.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp_pyang_src_rcfile.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT key,value from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key,value from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+0	["val_0"]
+0	["val_0"]
+0	["val_0"]
+10	["val_10"]
+100	["val_100"]
+100	["val_100"]
+103	["val_103"]
+103	["val_103"]
+104	["val_104"]
+104	["val_104"]
+105	["val_105"]
+11	["val_11"]
+111	["val_111"]
+113	["val_113"]
+113	["val_113"]
+114	["val_114"]
+116	["val_116"]
+118	["val_118"]
+118	["val_118"]
+119	["val_119"]
+PREHOOK: query: SELECT myCol from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT myCol from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+val_0
+val_0
+val_0
+val_10
+val_100
+val_100
+val_103
+val_103
+val_104
+val_104
+val_105
+val_11
+val_111
+val_113
+val_113
+val_114
+val_116
+val_118
+val_118
+val_119
+PREHOOK: query: SELECT * from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+0	["val_0"]	val_0
+0	["val_0"]	val_0
+0	["val_0"]	val_0
+10	["val_10"]	val_10
+100	["val_100"]	val_100
+100	["val_100"]	val_100
+103	["val_103"]	val_103
+103	["val_103"]	val_103
+104	["val_104"]	val_104
+104	["val_104"]	val_104
+105	["val_105"]	val_105
+11	["val_11"]	val_11
+111	["val_111"]	val_111
+113	["val_113"]	val_113
+113	["val_113"]	val_113
+114	["val_114"]	val_114
+116	["val_116"]	val_116
+118	["val_118"]	val_118
+118	["val_118"]	val_118
+119	["val_119"]	val_119
+PREHOOK: query: SELECT subq.key,subq.value 
+FROM (
+SELECT * from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT subq.key,subq.value 
+FROM (
+SELECT * from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+0	["val_0"]
+0	["val_0"]
+0	["val_0"]
+10	["val_10"]
+100	["val_100"]
+100	["val_100"]
+103	["val_103"]
+103	["val_103"]
+104	["val_104"]
+104	["val_104"]
+105	["val_105"]
+11	["val_11"]
+111	["val_111"]
+113	["val_113"]
+113	["val_113"]
+114	["val_114"]
+116	["val_116"]
+118	["val_118"]
+118	["val_118"]
+119	["val_119"]
+PREHOOK: query: SELECT subq.myCol
+FROM (
+SELECT * from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT subq.myCol
+FROM (
+SELECT * from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+val_0
+val_0
+val_0
+val_10
+val_100
+val_100
+val_103
+val_103
+val_104
+val_104
+val_105
+val_11
+val_111
+val_113
+val_113
+val_114
+val_116
+val_118
+val_118
+val_119
+PREHOOK: query: SELECT subq.key 
+FROM (
+SELECT key, value from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT subq.key 
+FROM (
+SELECT key, value from tmp_pyang_src_rcfile LATERAL VIEW explode(value) myTable AS myCol
+)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+0
+0
+0
+10
+100
+100
+103
+103
+104
+104
+105
+11
+111
+113
+113
+114
+116
+118
+118
+119
+PREHOOK: query: EXPLAIN SELECT value, myCol from (SELECT key, array(value[0]) AS value FROM tmp_pyang_src_rcfile GROUP BY value[0], key) a
+LATERAL VIEW explode(value) myTable AS myCol
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT value, myCol from (SELECT key, array(value[0]) AS value FROM tmp_pyang_src_rcfile GROUP BY value[0], key) a
+LATERAL VIEW explode(value) myTable AS myCol
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tmp_pyang_src_rcfile
+                  Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: value (type: array<string>), key (type: string)
+                    outputColumnNames: value, key
+                    Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: value[0] (type: string), key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                        Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 92 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: array(_col0) (type: array<string>)
+                  outputColumnNames: _col1
+                  Statistics: Num rows: 10 Data size: 92 Basic stats: COMPLETE Column stats: NONE
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 92 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col1 (type: array<string>)
+                      outputColumnNames: _col1
+                      Statistics: Num rows: 10 Data size: 92 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col1, _col2
+                        Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col1 (type: array<string>), _col2 (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                            table:
+                                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    Select Operator
+                      expressions: _col1 (type: array<string>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 92 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 92 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col1, _col2
+                          Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                          Select Operator
+                            expressions: _col1 (type: array<string>), _col2 (type: string)
+                            outputColumnNames: _col0, _col1
+                            Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                            File Output Operator
+                              compressed: false
+                              Statistics: Num rows: 20 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                              table:
+                                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT value, myCol from (SELECT key, array(value[0]) AS value FROM tmp_pyang_src_rcfile GROUP BY value[0], key) a
+LATERAL VIEW explode(value) myTable AS myCol
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT value, myCol from (SELECT key, array(value[0]) AS value FROM tmp_pyang_src_rcfile GROUP BY value[0], key) a
+LATERAL VIEW explode(value) myTable AS myCol
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmp_pyang_src_rcfile
+#### A masked pattern was here ####
+["val_0"]	val_0
+["val_10"]	val_10
+["val_100"]	val_100
+["val_103"]	val_103
+["val_104"]	val_104
+["val_105"]	val_105
+["val_11"]	val_11
+["val_111"]	val_111
+["val_113"]	val_113
+["val_114"]	val_114
+["val_116"]	val_116
+["val_118"]	val_118
+["val_119"]	val_119

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out b/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out
new file mode 100644
index 0000000..9297b0e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/limit_pushdown3.q.out
@@ -0,0 +1,1446 @@
+PREHOOK: query: explain
+select key,value from src order by key limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key,value from src order by key limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 0.3
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key,value from src order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value from src order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+100	val_100
+100	val_100
+103	val_103
+103	val_103
+104	val_104
+104	val_104
+105	val_105
+11	val_11
+111	val_111
+113	val_113
+113	val_113
+114	val_114
+116	val_116
+118	val_118
+118	val_118
+119	val_119
+PREHOOK: query: explain
+select key,value from src order by key desc limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key,value from src order by key desc limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: -
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 0.3
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key,value from src order by key desc limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value from src order by key desc limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+98	val_98
+98	val_98
+97	val_97
+97	val_97
+96	val_96
+95	val_95
+95	val_95
+92	val_92
+90	val_90
+90	val_90
+90	val_90
+9	val_9
+87	val_87
+86	val_86
+85	val_85
+84	val_84
+84	val_84
+83	val_83
+83	val_83
+82	val_82
+PREHOOK: query: explain
+select value, sum(key + 1) as sum from src group by value order by value limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select value, sum(key + 1) as sum from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: sum(_col1)
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.3
+                        value expressions: _col1 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.3
+                  value expressions: _col1 (type: double)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: double)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select value, sum(key + 1) as sum from src group by value order by value limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value, sum(key + 1) as sum from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_0	3.0
+val_10	11.0
+val_100	202.0
+val_103	208.0
+val_104	210.0
+val_105	106.0
+val_11	12.0
+val_111	112.0
+val_113	228.0
+val_114	115.0
+val_116	117.0
+val_118	238.0
+val_119	360.0
+val_12	26.0
+val_120	242.0
+val_125	252.0
+val_126	127.0
+val_128	387.0
+val_129	260.0
+val_131	132.0
+PREHOOK: query: -- deduped RS
+explain
+select value,avg(key + 1) from src group by value order by value limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- deduped RS
+explain
+select value,avg(key + 1) from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: avg(_col1)
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 214 Data size: 36594 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 214 Data size: 36594 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.3
+                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:double>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: avg(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.3
+                  value expressions: _col1 (type: double)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: double)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select value,avg(key + 1) from src group by value order by value limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value,avg(key + 1) from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_0	1.0
+val_10	11.0
+val_100	101.0
+val_103	104.0
+val_104	105.0
+val_105	106.0
+val_11	12.0
+val_111	112.0
+val_113	114.0
+val_114	115.0
+val_116	117.0
+val_118	119.0
+val_119	120.0
+val_12	13.0
+val_120	121.0
+val_125	126.0
+val_126	127.0
+val_128	129.0
+val_129	130.0
+val_131	132.0
+PREHOOK: query: -- distincts
+explain
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- distincts
+explain
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cdouble (type: double)
+                    outputColumnNames: cdouble
+                    Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: cdouble (type: double)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 4265 Data size: 25480 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: double)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: double)
+                        Statistics: Num rows: 4265 Data size: 25480 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.3
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 4265 Data size: 25480 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: double)
+                  sort order: +
+                  Statistics: Num rows: 4265 Data size: 25480 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.3
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: double)
+                outputColumnNames: _col0
+                Statistics: Num rows: 4265 Data size: 25480 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+NULL
+-16379.0
+-16373.0
+-16372.0
+-16369.0
+-16355.0
+-16339.0
+-16324.0
+-16311.0
+-16310.0
+-16309.0
+-16307.0
+-16306.0
+-16305.0
+-16300.0
+-16296.0
+-16280.0
+-16277.0
+-16274.0
+-16269.0
+PREHOOK: query: explain
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cdouble (type: double)
+                    outputColumnNames: ctinyint, cdouble
+                    Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: double)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: tinyint)
+                        Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: tinyint)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 95 Data size: 1048 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: tinyint)
+                    sort order: +
+                    Statistics: Num rows: 95 Data size: 1048 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 0.3
+                    value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: tinyint), VALUE._col0 (type: bigint)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 95 Data size: 1048 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+NULL	2932
+-64	24
+-63	19
+-62	27
+-61	25
+-60	27
+-59	31
+-58	23
+-57	35
+-56	36
+-55	29
+-54	26
+-53	22
+-52	33
+-51	21
+-50	30
+-49	26
+-48	29
+-47	22
+-46	24
+PREHOOK: query: explain 
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cdouble (type: double)
+                    outputColumnNames: ctinyint, cdouble
+                    Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: double)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: tinyint)
+                        Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: tinyint)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 95 Data size: 1048 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: tinyint)
+                    sort order: +
+                    Statistics: Num rows: 95 Data size: 1048 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 0.3
+                    value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: tinyint), VALUE._col0 (type: bigint)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 95 Data size: 1048 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+NULL	2932
+-64	24
+-63	19
+-62	27
+-61	25
+-60	27
+-59	31
+-58	23
+-57	35
+-56	36
+-55	29
+-54	26
+-53	22
+-52	33
+-51	21
+-50	30
+-49	26
+-48	29
+-47	22
+-46	24
+PREHOOK: query: -- multi distinct
+explain
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- multi distinct
+explain
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 1779850 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
+                    outputColumnNames: ctinyint, cstring1, cstring2
+                    Statistics: Num rows: 12288 Data size: 1779850 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count(DISTINCT cstring1), count(DISTINCT cstring2)
+                      keys: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Statistics: Num rows: 12288 Data size: 1976458 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string)
+                        sort order: +++
+                        Map-reduce partition columns: _col0 (type: tinyint)
+                        Statistics: Num rows: 12288 Data size: 1976458 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.3
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0)
+                keys: KEY._col0 (type: tinyint)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 95 Data size: 1808 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: tinyint)
+                  sort order: +
+                  Statistics: Num rows: 95 Data size: 1808 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.3
+                  value expressions: _col1 (type: bigint), _col2 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: tinyint), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 95 Data size: 1808 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+NULL	3065	3
+-64	3	13
+-63	3	16
+-62	3	23
+-61	3	25
+-60	3	25
+-59	3	27
+-58	3	24
+-57	3	23
+-56	3	22
+-55	3	21
+-54	3	21
+-53	3	17
+-52	3	21
+-51	1012	1045
+-50	3	25
+-49	3	24
+-48	3	27
+-47	3	23
+-46	3	19
+PREHOOK: query: -- limit zero
+explain
+select key,value from src order by key limit 0
+PREHOOK: type: QUERY
+POSTHOOK: query: -- limit zero
+explain
+select key,value from src order by key limit 0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 0
+      Processor Tree:
+        TableScan
+          alias: src
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Limit
+              Number of rows: 0
+              ListSink
+
+PREHOOK: query: select key,value from src order by key limit 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value from src order by key limit 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+PREHOOK: query: -- 2MR (applied to last RS)
+explain
+select value, sum(key) as sum from src group by value order by sum limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2MR (applied to last RS)
+explain
+select value, sum(key) as sum from src group by value order by sum limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: value (type: string), key (type: string)
+                    outputColumnNames: value, key
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: sum(key)
+                      keys: value (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col1 (type: double)
+                  sort order: +
+                  Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.3
+                  value expressions: _col0 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: double)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select value, sum(key) as sum from src group by value order by sum limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value, sum(key) as sum from src group by value order by sum limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_0	0.0
+val_2	2.0
+val_4	4.0
+val_8	8.0
+val_9	9.0
+val_10	10.0
+val_11	11.0
+val_5	15.0
+val_17	17.0
+val_19	19.0
+val_20	20.0
+val_12	24.0
+val_27	27.0
+val_28	28.0
+val_30	30.0
+val_15	30.0
+val_33	33.0
+val_34	34.0
+val_18	36.0
+val_41	41.0
+PREHOOK: query: -- map aggregation disabled
+explain
+select value, sum(key) as sum from src group by value order by value limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map aggregation disabled
+explain
+select value, sum(key) as sum from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: value (type: string)
+                      sort order: +
+                      Map-reduce partition columns: value (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 0.3
+                      value expressions: key (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.3
+                  value expressions: _col1 (type: double)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: double)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select value, sum(key) as sum from src group by value order by value limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value, sum(key) as sum from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_0	0.0
+val_10	10.0
+val_100	200.0
+val_103	206.0
+val_104	208.0
+val_105	105.0
+val_11	11.0
+val_111	111.0
+val_113	226.0
+val_114	114.0
+val_116	116.0
+val_118	236.0
+val_119	357.0
+val_12	24.0
+val_120	240.0
+val_125	250.0
+val_126	126.0
+val_128	384.0
+val_129	258.0
+val_131	131.0
+PREHOOK: query: -- flush for order-by
+explain
+select key,value,value,value,value,value,value,value,value from src order by key limit 100
+PREHOOK: type: QUERY
+POSTHOOK: query: -- flush for order-by
+explain
+select key,value,value,value,value,value,value,value,value from src order by key limit 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 407500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Statistics: Num rows: 500 Data size: 407500 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 2.0E-5
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 500 Data size: 407500 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 100
+                  Statistics: Num rows: 100 Data size: 81500 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 100 Data size: 81500 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 100
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key,value,value,value,value,value,value,value,value from src order by key limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value,value,value,value,value,value,value,value from src order by key limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0	val_0	val_0	val_0	val_0	val_0	val_0	val_0
+0	val_0	val_0	val_0	val_0	val_0	val_0	val_0	val_0
+0	val_0	val_0	val_0	val_0	val_0	val_0	val_0	val_0
+10	val_10	val_10	val_10	val_10	val_10	val_10	val_10	val_10
+100	val_100	val_100	val_100	val_100	val_100	val_100	val_100	val_100
+100	val_100	val_100	val_100	val_100	val_100	val_100	val_100	val_100
+103	val_103	val_103	val_103	val_103	val_103	val_103	val_103	val_103
+103	val_103	val_103	val_103	val_103	val_103	val_103	val_103	val_103
+104	val_104	val_104	val_104	val_104	val_104	val_104	val_104	val_104
+104	val_104	val_104	val_104	val_104	val_104	val_104	val_104	val_104
+105	val_105	val_105	val_105	val_105	val_105	val_105	val_105	val_105
+11	val_11	val_11	val_11	val_11	val_11	val_11	val_11	val_11
+111	val_111	val_111	val_111	val_111	val_111	val_111	val_111	val_111
+113	val_113	val_113	val_113	val_113	val_113	val_113	val_113	val_113
+113	val_113	val_113	val_113	val_113	val_113	val_113	val_113	val_113
+114	val_114	val_114	val_114	val_114	val_114	val_114	val_114	val_114
+116	val_116	val_116	val_116	val_116	val_116	val_116	val_116	val_116
+118	val_118	val_118	val_118	val_118	val_118	val_118	val_118	val_118
+118	val_118	val_118	val_118	val_118	val_118	val_118	val_118	val_118
+119	val_119	val_119	val_119	val_119	val_119	val_119	val_119	val_119
+119	val_119	val_119	val_119	val_119	val_119	val_119	val_119	val_119
+119	val_119	val_119	val_119	val_119	val_119	val_119	val_119	val_119
+12	val_12	val_12	val_12	val_12	val_12	val_12	val_12	val_12
+12	val_12	val_12	val_12	val_12	val_12	val_12	val_12	val_12
+120	val_120	val_120	val_120	val_120	val_120	val_120	val_120	val_120
+120	val_120	val_120	val_120	val_120	val_120	val_120	val_120	val_120
+125	val_125	val_125	val_125	val_125	val_125	val_125	val_125	val_125
+125	val_125	val_125	val_125	val_125	val_125	val_125	val_125	val_125
+126	val_126	val_126	val_126	val_126	val_126	val_126	val_126	val_126
+128	val_128	val_128	val_128	val_128	val_128	val_128	val_128	val_128
+128	val_128	val_128	val_128	val_128	val_128	val_128	val_128	val_128
+128	val_128	val_128	val_128	val_128	val_128	val_128	val_128	val_128
+129	val_129	val_129	val_129	val_129	val_129	val_129	val_129	val_129
+129	val_129	val_129	val_129	val_129	val_129	val_129	val_129	val_129
+131	val_131	val_131	val_131	val_131	val_131	val_131	val_131	val_131
+133	val_133	val_133	val_133	val_133	val_133	val_133	val_133	val_133
+134	val_134	val_134	val_134	val_134	val_134	val_134	val_134	val_134
+134	val_134	val_134	val_134	val_134	val_134	val_134	val_134	val_134
+136	val_136	val_136	val_136	val_136	val_136	val_136	val_136	val_136
+137	val_137	val_137	val_137	val_137	val_137	val_137	val_137	val_137
+137	val_137	val_137	val_137	val_137	val_137	val_137	val_137	val_137
+138	val_138	val_138	val_138	val_138	val_138	val_138	val_138	val_138
+138	val_138	val_138	val_138	val_138	val_138	val_138	val_138	val_138
+138	val_138	val_138	val_138	val_138	val_138	val_138	val_138	val_138
+138	val_138	val_138	val_138	val_138	val_138	val_138	val_138	val_138
+143	val_143	val_143	val_143	val_143	val_143	val_143	val_143	val_143
+145	val_145	val_145	val_145	val_145	val_145	val_145	val_145	val_145
+146	val_146	val_146	val_146	val_146	val_146	val_146	val_146	val_146
+146	val_146	val_146	val_146	val_146	val_146	val_146	val_146	val_146
+149	val_149	val_149	val_149	val_149	val_149	val_149	val_149	val_149
+149	val_149	val_149	val_149	val_149	val_149	val_149	val_149	val_149
+15	val_15	val_15	val_15	val_15	val_15	val_15	val_15	val_15
+15	val_15	val_15	val_15	val_15	val_15	val_15	val_15	val_15
+150	val_150	val_150	val_150	val_150	val_150	val_150	val_150	val_150
+152	val_152	val_152	val_152	val_152	val_152	val_152	val_152	val_152
+152	val_152	val_152	val_152	val_152	val_152	val_152	val_152	val_152
+153	val_153	val_153	val_153	val_153	val_153	val_153	val_153	val_153
+155	val_155	val_155	val_155	val_155	val_155	val_155	val_155	val_155
+156	val_156	val_156	val_156	val_156	val_156	val_156	val_156	val_156
+157	val_157	val_157	val_157	val_157	val_157	val_157	val_157	val_157
+158	val_158	val_158	val_158	val_158	val_158	val_158	val_158	val_158
+160	val_160	val_160	val_160	val_160	val_160	val_160	val_160	val_160
+162	val_162	val_162	val_162	val_162	val_162	val_162	val_162	val_162
+163	val_163	val_163	val_163	val_163	val_163	val_163	val_163	val_163
+164	val_164	val_164	val_164	val_164	val_164	val_164	val_164	val_164
+164	val_164	val_164	val_164	val_164	val_164	val_164	val_164	val_164
+165	val_165	val_165	val_165	val_165	val_165	val_165	val_165	val_165
+165	val_165	val_165	val_165	val_165	val_165	val_165	val_165	val_165
+166	val_166	val_166	val_166	val_166	val_166	val_166	val_166	val_166
+167	val_167	val_167	val_167	val_167	val_167	val_167	val_167	val_167
+167	val_167	val_167	val_167	val_167	val_167	val_167	val_167	val_167
+167	val_167	val_167	val_167	val_167	val_167	val_167	val_167	val_167
+168	val_168	val_168	val_168	val_168	val_168	val_168	val_168	val_168
+169	val_169	val_169	val_169	val_169	val_169	val_169	val_169	val_169
+169	val_169	val_169	val_169	val_169	val_169	val_169	val_169	val_169
+169	val_169	val_169	val_169	val_169	val_169	val_169	val_169	val_169
+169	val_169	val_169	val_169	val_169	val_169	val_169	val_169	val_169
+17	val_17	val_17	val_17	val_17	val_17	val_17	val_17	val_17
+170	val_170	val_170	val_170	val_170	val_170	val_170	val_170	val_170
+172	val_172	val_172	val_172	val_172	val_172	val_172	val_172	val_172
+172	val_172	val_172	val_172	val_172	val_172	val_172	val_172	val_172
+174	val_174	val_174	val_174	val_174	val_174	val_174	val_174	val_174
+174	val_174	val_174	val_174	val_174	val_174	val_174	val_174	val_174
+175	val_175	val_175	val_175	val_175	val_175	val_175	val_175	val_175
+175	val_175	val_175	val_175	val_175	val_175	val_175	val_175	val_175
+176	val_176	val_176	val_176	val_176	val_176	val_176	val_176	val_176
+176	val_176	val_176	val_176	val_176	val_176	val_176	val_176	val_176
+177	val_177	val_177	val_177	val_177	val_177	val_177	val_177	val_177
+178	val_178	val_178	val_178	val_178	val_178	val_178	val_178	val_178
+179	val_179	val_179	val_179	val_179	val_179	val_179	val_179	val_179
+179	val_179	val_179	val_179	val_179	val_179	val_179	val_179	val_179
+18	val_18	val_18	val_18	val_18	val_18	val_18	val_18	val_18
+18	val_18	val_18	val_18	val_18	val_18	val_18	val_18	val_18
+180	val_180	val_180	val_180	val_180	val_180	val_180	val_180	val_180
+181	val_181	val_181	val_181	val_181	val_181	val_181	val_181	val_181
+183	val_183	val_183	val_183	val_183	val_183	val_183	val_183	val_183
+186	val_186	val_186	val_186	val_186	val_186	val_186	val_186	val_186
+187	val_187	val_187	val_187	val_187	val_187	val_187	val_187	val_187
+187	val_187	val_187	val_187	val_187	val_187	val_187	val_187	val_187
+187	val_187	val_187	val_187	val_187	val_187	val_187	val_187	val_187
+PREHOOK: query: -- flush for group-by
+explain
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
+PREHOOK: type: QUERY
+POSTHOOK: query: -- flush for group-by
+explain
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: concat(key, value, value, value, value, value, value, value, value, value) (type: string), key (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 96000 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: double)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: double)
+                    sort order: +
+                    Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 2.0E-5
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: double)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 100
+                  Statistics: Num rows: 100 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 100 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 100
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0.0
+2.0
+4.0
+8.0
+9.0
+10.0
+11.0
+15.0
+17.0
+19.0
+20.0
+24.0
+27.0
+28.0
+30.0
+30.0
+33.0
+34.0
+36.0
+41.0
+43.0
+44.0
+47.0
+48.0
+52.0
+53.0
+54.0
+57.0
+64.0
+65.0
+66.0
+69.0
+74.0
+74.0
+77.0
+78.0
+80.0
+82.0
+84.0
+85.0
+86.0
+87.0
+92.0
+96.0
+102.0
+105.0
+105.0
+111.0
+114.0
+116.0
+116.0
+126.0
+131.0
+133.0
+134.0
+136.0
+143.0
+144.0
+145.0
+150.0
+152.0
+153.0
+155.0
+156.0
+157.0
+158.0
+160.0
+162.0
+163.0
+166.0
+166.0
+168.0
+168.0
+170.0
+177.0
+178.0
+180.0
+181.0
+183.0
+186.0
+189.0
+190.0
+190.0
+192.0
+194.0
+194.0
+196.0
+196.0
+200.0
+201.0
+202.0
+206.0
+208.0
+210.0
+214.0
+218.0
+222.0
+226.0
+226.0
+228.0


[19/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/multi_column_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/multi_column_in.q.out b/ql/src/test/results/clientpositive/llap/multi_column_in.q.out
new file mode 100644
index 0000000..91f018b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/multi_column_in.q.out
@@ -0,0 +1,365 @@
+PREHOOK: query: drop table emps
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table emps
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table emps (empno int, deptno int, empname string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@emps
+POSTHOOK: query: create table emps (empno int, deptno int, empname string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@emps
+PREHOOK: query: insert into table emps values (1,2,"11"),(1,2,"11"),(3,4,"33"),(1,3,"11"),(2,5,"22"),(2,5,"22")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@emps
+POSTHOOK: query: insert into table emps values (1,2,"11"),(1,2,"11"),(3,4,"33"),(1,3,"11"),(2,5,"22"),(2,5,"22")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@emps
+POSTHOOK: Lineage: emps.deptno EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: emps.empname SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: emps.empno EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select * from emps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+3	4	33
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where (int(empno+deptno/2), int(deptno/3)) in ((2,0),(3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (int(empno+deptno/2), int(deptno/3)) in ((2,0),(3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+PREHOOK: query: select * from emps where (int(empno+deptno/2), int(deptno/3)) not in ((2,0),(3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (int(empno+deptno/2), int(deptno/3)) not in ((2,0),(3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+3	4	33
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where (empno,deptno) in ((1,2),(3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (empno,deptno) in ((1,2),(3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+PREHOOK: query: select * from emps where (empno,deptno) not in ((1,2),(3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (empno,deptno) not in ((1,2),(3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+3	4	33
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where (empno,deptno) in ((1,2),(1,3))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (empno,deptno) in ((1,2),(1,3))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+1	3	11
+PREHOOK: query: select * from emps where (empno,deptno) not in ((1,2),(1,3))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (empno,deptno) not in ((1,2),(1,3))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+3	4	33
+2	5	22
+2	5	22
+PREHOOK: query: explain
+select * from emps where (empno+1,deptno) in ((1,2),(3,2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from emps where (empno+1,deptno) in ((1,2),(3,2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: emps
+          Filter Operator
+            predicate: (struct((empno + 1),deptno)) IN (const struct(1,2), const struct(3,2)) (type: boolean)
+            Select Operator
+              expressions: empno (type: int), deptno (type: int), empname (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              ListSink
+
+PREHOOK: query: explain 
+select * from emps where (empno+1,deptno) not in ((1,2),(3,2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select * from emps where (empno+1,deptno) not in ((1,2),(3,2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: emps
+          Filter Operator
+            predicate: (not (struct((empno + 1),deptno)) IN (const struct(1,2), const struct(3,2))) (type: boolean)
+            Select Operator
+              expressions: empno (type: int), deptno (type: int), empname (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              ListSink
+
+PREHOOK: query: select * from emps where empno in (1,2)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where empno in (1,2)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where empno in (1,2) and deptno > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where empno in (1,2) and deptno > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where (empno) in (1,2) and deptno > 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (empno) in (1,2) and deptno > 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where ((empno) in (1,2) and deptno > 2)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno) in (1,2) and deptno > 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: explain select * from emps where ((empno*2)|1,deptno) in ((empno+1,2),(empno+2,2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from emps where ((empno*2)|1,deptno) in ((empno+1,2),(empno+2,2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: emps
+          Filter Operator
+            predicate: (struct(((empno * 2) | 1),deptno)) IN (struct((empno + 1),2), struct((empno + 2),2)) (type: boolean)
+            Select Operator
+              expressions: empno (type: int), deptno (type: int), empname (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              ListSink
+
+PREHOOK: query: select * from emps where ((empno*2)|1,deptno) in ((empno+1,2),(empno+2,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,deptno) in ((empno+1,2),(empno+2,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+PREHOOK: query: select (empno*2)|1,substr(empname,1,1) from emps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select (empno*2)|1,substr(empname,1,1) from emps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+3	1
+3	1
+7	3
+3	1
+5	2
+5	2
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+2,'2'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+2,'2'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+1,'2'),(empno+2,'2'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+1,'2'),(empno+2,'2'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+3	4	33
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+1,'2'),(empno+3,'2'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+1,'2'),(empno+3,'2'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+3	4	33
+1	3	11
+PREHOOK: query: select sum(empno), empname from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2'))
+group by empname
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(empno), empname from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2'))
+group by empname
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+4	22
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2'))
+union
+select * from emps where (empno,deptno) in ((1,2),(3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2'))
+union
+select * from emps where (empno,deptno) in ((1,2),(3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+2	5	22
+PREHOOK: query: drop view v
+PREHOOK: type: DROPVIEW
+POSTHOOK: query: drop view v
+POSTHOOK: type: DROPVIEW
+PREHOOK: query: create view v as 
+select * from(
+select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2'))
+union
+select * from emps where (empno,deptno) in ((1,2),(3,2)))subq order by empno desc
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@emps
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v
+POSTHOOK: query: create view v as 
+select * from(
+select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2'))
+union
+select * from emps where (empno,deptno) in ((1,2),(3,2)))subq order by empno desc
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@emps
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v
+PREHOOK: query: select * from v
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+PREHOOK: Input: default@v
+#### A masked pattern was here ####
+POSTHOOK: query: select * from v
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+POSTHOOK: Input: default@v
+#### A masked pattern was here ####
+2	5	22
+1	2	11
+PREHOOK: query: select subq.e1 from 
+(select (empno*2)|1 as e1, substr(empname,1,1) as n1 from emps)subq
+join
+(select empno as e2 from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')))subq2
+on e1=e2+1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select subq.e1 from 
+(select (empno*2)|1 as e1, substr(empname,1,1) as n1 from emps)subq
+join
+(select empno as e2 from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+1,'2'),(empno+3,'2')))subq2
+on e1=e2+1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+3
+3
+3
+3
+3
+3

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/multi_column_in_single.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/multi_column_in_single.q.out b/ql/src/test/results/clientpositive/llap/multi_column_in_single.q.out
new file mode 100644
index 0000000..3de09b1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/multi_column_in_single.q.out
@@ -0,0 +1,327 @@
+PREHOOK: query: select * from src where (key, value) in (('238','val_238'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src where (key, value) in (('238','val_238'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+238	val_238
+238	val_238
+PREHOOK: query: drop table emps
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table emps
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table emps (empno int, deptno int, empname string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@emps
+POSTHOOK: query: create table emps (empno int, deptno int, empname string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@emps
+PREHOOK: query: insert into table emps values (1,2,"11"),(1,2,"11"),(3,4,"33"),(1,3,"11"),(2,5,"22"),(2,5,"22")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@emps
+POSTHOOK: query: insert into table emps values (1,2,"11"),(1,2,"11"),(3,4,"33"),(1,3,"11"),(2,5,"22"),(2,5,"22")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@emps
+POSTHOOK: Lineage: emps.deptno EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: emps.empname SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: emps.empno EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select * from emps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+3	4	33
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where (int(empno+deptno/2), int(deptno/3)) in ((3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (int(empno+deptno/2), int(deptno/3)) in ((3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+PREHOOK: query: select * from emps where (int(empno+deptno/2), int(deptno/3)) not in ((3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (int(empno+deptno/2), int(deptno/3)) not in ((3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+3	4	33
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where (empno,deptno) in ((3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (empno,deptno) in ((3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+PREHOOK: query: select * from emps where (empno,deptno) not in ((3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (empno,deptno) not in ((3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+3	4	33
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where (empno,deptno) in ((1,3))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (empno,deptno) in ((1,3))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	3	11
+PREHOOK: query: select * from emps where (empno,deptno) not in ((1,3))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where (empno,deptno) not in ((1,3))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+3	4	33
+2	5	22
+2	5	22
+PREHOOK: query: explain
+select * from emps where (empno+1,deptno) in ((3,2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from emps where (empno+1,deptno) in ((3,2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: emps
+          Filter Operator
+            predicate: (struct((empno + 1),deptno)) IN (const struct(3,2)) (type: boolean)
+            Select Operator
+              expressions: empno (type: int), deptno (type: int), empname (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              ListSink
+
+PREHOOK: query: explain 
+select * from emps where (empno+1,deptno) not in ((3,2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select * from emps where (empno+1,deptno) not in ((3,2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: emps
+          Filter Operator
+            predicate: (not (struct((empno + 1),deptno)) IN (const struct(3,2))) (type: boolean)
+            Select Operator
+              expressions: empno (type: int), deptno (type: int), empname (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              ListSink
+
+PREHOOK: query: explain select * from emps where ((empno*2)|1,deptno) in ((empno+2,2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from emps where ((empno*2)|1,deptno) in ((empno+2,2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: emps
+          Filter Operator
+            predicate: (struct(((empno * 2) | 1),deptno)) IN (struct((empno + 2),2)) (type: boolean)
+            Select Operator
+              expressions: empno (type: int), deptno (type: int), empname (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              ListSink
+
+PREHOOK: query: select * from emps where ((empno*2)|1,deptno) in ((empno+2,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,deptno) in ((empno+2,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+PREHOOK: query: select (empno*2)|1,substr(empname,1,1) from emps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select (empno*2)|1,substr(empname,1,1) from emps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+3	1
+3	1
+7	3
+3	1
+5	2
+5	2
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+2,'2'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+2,'2'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+2,'2'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+2,'2'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+3	4	33
+1	3	11
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+2	5	22
+2	5	22
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+3,'2'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) not in ((empno+3,'2'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+1	2	11
+1	2	11
+3	4	33
+1	3	11
+PREHOOK: query: select sum(empno), empname from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2'))
+group by empname
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(empno), empname from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2'))
+group by empname
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+4	22
+PREHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2'))
+union
+select * from emps where (empno,deptno) in ((3,2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2'))
+union
+select * from emps where (empno,deptno) in ((3,2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+2	5	22
+PREHOOK: query: drop view v
+PREHOOK: type: DROPVIEW
+POSTHOOK: query: drop view v
+POSTHOOK: type: DROPVIEW
+PREHOOK: query: create view v as 
+select * from(
+select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2'))
+union
+select * from emps where (empno,deptno) in ((3,2)))subq order by empno desc
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@emps
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v
+POSTHOOK: query: create view v as 
+select * from(
+select * from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2'))
+union
+select * from emps where (empno,deptno) in ((3,2)))subq order by empno desc
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@emps
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v
+PREHOOK: query: select * from v
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+PREHOOK: Input: default@v
+#### A masked pattern was here ####
+POSTHOOK: query: select * from v
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+POSTHOOK: Input: default@v
+#### A masked pattern was here ####
+2	5	22
+PREHOOK: query: select subq.e1 from 
+(select (empno*2)|1 as e1, substr(empname,1,1) as n1 from emps)subq
+join
+(select empno as e2 from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')))subq2
+on e1=e2+1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: select subq.e1 from 
+(select (empno*2)|1 as e1, substr(empname,1,1) as n1 from emps)subq
+join
+(select empno as e2 from emps where ((empno*2)|1,substr(empname,1,1)) in ((empno+3,'2')))subq2
+on e1=e2+1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+3
+3
+3
+3
+3
+3


[05/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/smb_mapjoin_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_4.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_4.q.out
new file mode 100644
index 0000000..4dd7f4b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_4.q.out
@@ -0,0 +1,1433 @@
+PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket_1
+POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket_1
+PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket_2
+POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket_2
+PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket_3
+POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket_3
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_bucket_1
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_bucket_1
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_bucket_2
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_bucket_2
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_bucket_3
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_bucket_3
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                     Left Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+1	val_1	NULL	NULL	NULL	NULL
+10	val_10	NULL	NULL	NULL	NULL
+3	val_3	NULL	NULL	NULL	NULL
+4	val_4	NULL	NULL	NULL	NULL
+5	val_5	NULL	NULL	NULL	NULL
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                     Right Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	20	val_20
+NULL	NULL	NULL	NULL	23	val_23
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                     Outer Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+1	val_1	NULL	NULL	NULL	NULL
+10	val_10	NULL	NULL	NULL	NULL
+3	val_3	NULL	NULL	NULL	NULL
+4	val_4	NULL	NULL	NULL	NULL
+5	val_5	NULL	NULL	NULL	NULL
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	20	val_20
+NULL	NULL	NULL	NULL	23	val_23
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                     Left Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	25	val_25	NULL	NULL
+NULL	NULL	30	val_30	NULL	NULL
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                     Right Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                     Outer Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	25	val_25	NULL	NULL
+NULL	NULL	30	val_30	NULL	NULL
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Left Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+1	val_1	NULL	NULL	NULL	NULL
+10	val_10	NULL	NULL	NULL	NULL
+3	val_3	NULL	NULL	NULL	NULL
+4	val_4	NULL	NULL	NULL	NULL
+5	val_5	NULL	NULL	NULL	NULL
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	25	val_25	NULL	NULL
+NULL	NULL	30	val_30	NULL	NULL
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Right Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Outer Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,b)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+1	val_1	NULL	NULL	NULL	NULL
+10	val_10	NULL	NULL	NULL	NULL
+3	val_3	NULL	NULL	NULL	NULL
+4	val_4	NULL	NULL	NULL	NULL
+5	val_5	NULL	NULL	NULL	NULL
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	25	val_25	NULL	NULL
+NULL	NULL	30	val_30	NULL	NULL
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	4	val_4


[27/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/columnStatsUpdateForStatsOptimizer_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/columnStatsUpdateForStatsOptimizer_1.q.out b/ql/src/test/results/clientpositive/llap/columnStatsUpdateForStatsOptimizer_1.q.out
new file mode 100644
index 0000000..3facce7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/columnStatsUpdateForStatsOptimizer_1.q.out
@@ -0,0 +1,1029 @@
+PREHOOK: query: drop table calendar
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table calendar
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE calendar (year int, month int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@calendar
+POSTHOOK: query: CREATE TABLE calendar (year int, month int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@calendar
+PREHOOK: query: insert into calendar values (2010, 10), (2011, 11), (2012, 12)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@calendar
+POSTHOOK: query: insert into calendar values (2010, 10), (2011, 11), (2012, 12)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@calendar
+POSTHOOK: Lineage: calendar.month EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: calendar.year EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted calendar
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@calendar
+POSTHOOK: query: desc formatted calendar
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@calendar
+# col_name            	data_type           	comment             
+	 	 
+year                	int                 	                    
+month               	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	3                   
+	rawDataSize         	21                  
+	totalSize           	24                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: analyze table calendar compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+PREHOOK: Output: default@calendar
+POSTHOOK: query: analyze table calendar compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+POSTHOOK: Output: default@calendar
+PREHOOK: query: desc formatted calendar
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@calendar
+POSTHOOK: query: desc formatted calendar
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@calendar
+# col_name            	data_type           	comment             
+	 	 
+year                	int                 	                    
+month               	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	3                   
+	rawDataSize         	21                  
+	totalSize           	24                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select count(1) from calendar
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(1) from calendar
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select max(year) from calendar
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(year) from calendar
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: calendar
+                  Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: year (type: int)
+                    outputColumnNames: year
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: max(year)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: int)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: max(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select max(year) from calendar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: select max(year) from calendar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+2012
+PREHOOK: query: select max(month) from calendar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: select max(month) from calendar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+12
+PREHOOK: query: analyze table calendar compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table calendar compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted calendar
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@calendar
+POSTHOOK: query: desc formatted calendar
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@calendar
+# col_name            	data_type           	comment             
+	 	 
+year                	int                 	                    
+month               	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"month\":\"true\",\"year\":\"true\"}}
+	numFiles            	1                   
+	numRows             	3                   
+	rawDataSize         	21                  
+	totalSize           	24                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select max(year) from calendar
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(year) from calendar
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select max(year) from calendar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: select max(year) from calendar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+2012
+PREHOOK: query: insert into calendar values (2015, 15)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__2
+PREHOOK: Output: default@calendar
+POSTHOOK: query: insert into calendar values (2015, 15)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__2
+POSTHOOK: Output: default@calendar
+POSTHOOK: Lineage: calendar.month EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: calendar.year EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted calendar
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@calendar
+POSTHOOK: query: desc formatted calendar
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@calendar
+# col_name            	data_type           	comment             
+	 	 
+year                	int                 	                    
+month               	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numRows             	4                   
+	rawDataSize         	28                  
+	totalSize           	32                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select max(year) from calendar
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(year) from calendar
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: calendar
+                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: year (type: int)
+                    outputColumnNames: year
+                    Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: max(year)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: int)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: max(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select max(year) from calendar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: select max(year) from calendar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+2015
+PREHOOK: query: explain select max(month) from calendar
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(month) from calendar
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: calendar
+                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: month (type: int)
+                    outputColumnNames: month
+                    Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: max(month)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: int)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: max(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select max(month) from calendar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: select max(month) from calendar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+15
+PREHOOK: query: analyze table calendar compute statistics for columns year
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table calendar compute statistics for columns year
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted calendar
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@calendar
+POSTHOOK: query: desc formatted calendar
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@calendar
+# col_name            	data_type           	comment             
+	 	 
+year                	int                 	                    
+month               	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"year\":\"true\"}}
+	numFiles            	2                   
+	numRows             	4                   
+	rawDataSize         	28                  
+	totalSize           	32                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select max(year) from calendar
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(year) from calendar
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select max(year) from calendar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: select max(year) from calendar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+2015
+PREHOOK: query: explain select max(month) from calendar
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(month) from calendar
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: calendar
+                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: month (type: int)
+                    outputColumnNames: month
+                    Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: max(month)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: int)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: max(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select max(month) from calendar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: select max(month) from calendar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+15
+PREHOOK: query: analyze table calendar compute statistics for columns month
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table calendar compute statistics for columns month
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted calendar
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@calendar
+POSTHOOK: query: desc formatted calendar
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@calendar
+# col_name            	data_type           	comment             
+	 	 
+year                	int                 	                    
+month               	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"month\":\"true\",\"year\":\"true\"}}
+	numFiles            	2                   
+	numRows             	4                   
+	rawDataSize         	28                  
+	totalSize           	32                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select max(month) from calendar
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(month) from calendar
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select max(month) from calendar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendar
+#### A masked pattern was here ####
+POSTHOOK: query: select max(month) from calendar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendar
+#### A masked pattern was here ####
+15
+PREHOOK: query: CREATE TABLE calendarp (`year` int)  partitioned by (p int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@calendarp
+POSTHOOK: query: CREATE TABLE calendarp (`year` int)  partitioned by (p int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@calendarp
+PREHOOK: query: insert into table calendarp partition (p=1) values (2010), (2011), (2012)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__3
+PREHOOK: Output: default@calendarp@p=1
+POSTHOOK: query: insert into table calendarp partition (p=1) values (2010), (2011), (2012)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__3
+POSTHOOK: Output: default@calendarp@p=1
+POSTHOOK: Lineage: calendarp PARTITION(p=1).year EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted calendarp partition (p=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@calendarp
+POSTHOOK: query: desc formatted calendarp partition (p=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@calendarp
+# col_name            	data_type           	comment             
+	 	 
+year                	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+p                   	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	calendarp           	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	3                   
+	rawDataSize         	12                  
+	totalSize           	15                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select max(year) from calendarp where p=1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(year) from calendarp where p=1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: calendarp
+                  Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: year (type: int)
+                    outputColumnNames: year
+                    Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: max(year)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: int)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: max(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select max(year) from calendarp where p=1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendarp
+PREHOOK: Input: default@calendarp@p=1
+#### A masked pattern was here ####
+POSTHOOK: query: select max(year) from calendarp where p=1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendarp
+POSTHOOK: Input: default@calendarp@p=1
+#### A masked pattern was here ####
+2012
+PREHOOK: query: analyze table calendarp partition (p=1) compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendarp
+PREHOOK: Input: default@calendarp@p=1
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table calendarp partition (p=1) compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendarp
+POSTHOOK: Input: default@calendarp@p=1
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted calendarp partition (p=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@calendarp
+POSTHOOK: query: desc formatted calendarp partition (p=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@calendarp
+# col_name            	data_type           	comment             
+	 	 
+year                	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+p                   	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	calendarp           	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"year\":\"true\"}}
+	numFiles            	1                   
+	numRows             	3                   
+	rawDataSize         	12                  
+	totalSize           	15                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select max(year) from calendarp where p=1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(year) from calendarp where p=1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: insert into table calendarp partition (p=1) values (2015)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__4
+PREHOOK: Output: default@calendarp@p=1
+POSTHOOK: query: insert into table calendarp partition (p=1) values (2015)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__4
+POSTHOOK: Output: default@calendarp@p=1
+POSTHOOK: Lineage: calendarp PARTITION(p=1).year EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: desc formatted calendarp partition (p=1)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@calendarp
+POSTHOOK: query: desc formatted calendarp partition (p=1)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@calendarp
+# col_name            	data_type           	comment             
+	 	 
+year                	int                 	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+p                   	int                 	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[1]                 	 
+Database:           	default             	 
+Table:              	calendarp           	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numRows             	4                   
+	rawDataSize         	16                  
+	totalSize           	20                  
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain select max(year) from calendarp where p=1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select max(year) from calendarp where p=1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: calendarp
+                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: year (type: int)
+                    outputColumnNames: year
+                    Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: max(year)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: int)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: max(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select max(year) from calendarp where p=1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@calendarp
+PREHOOK: Input: default@calendarp@p=1
+#### A masked pattern was here ####
+POSTHOOK: query: select max(year) from calendarp where p=1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@calendarp
+POSTHOOK: Input: default@calendarp@p=1
+#### A masked pattern was here ####
+2015
+PREHOOK: query: create table t (key string, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t
+POSTHOOK: query: create table t (key string, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t
+PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t
+POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table t
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t
+PREHOOK: query: desc formatted t
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@t
+POSTHOOK: query: desc formatted t
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@t
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	numFiles            	1                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: analyze table t compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+PREHOOK: Output: default@t
+POSTHOOK: query: analyze table t compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+POSTHOOK: Output: default@t
+PREHOOK: query: desc formatted t
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@t
+POSTHOOK: query: desc formatted t
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@t
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/column_access_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/column_access_stats.q.out b/ql/src/test/results/clientpositive/llap/column_access_stats.q.out
new file mode 100644
index 0000000..5a66b0d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/column_access_stats.q.out
@@ -0,0 +1,938 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+-- This test is used for testing the ColumnAccessAnalyzer
+
+CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T2
+PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T3
+PREHOOK: query: CREATE TABLE T4(key STRING, val STRING) PARTITIONED BY (p STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T4
+PREHOOK: query: -- Simple select queries
+SELECT key FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+1
+2
+3
+7
+8
+8
+PREHOOK: query: SELECT key, val FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+1	11
+2	12
+3	13
+7	17
+8	18
+8	28
+PREHOOK: query: SELECT 1 FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+1
+1
+1
+1
+1
+1
+PREHOOK: query: SELECT key, val from T4 where p=1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+Table:default@t4
+Columns:key,p,val
+
+PREHOOK: query: SELECT val FROM T4 where p=1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+Table:default@t4
+Columns:p,val
+
+PREHOOK: query: SELECT p, val FROM T4 where p=1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+Table:default@t4
+Columns:p,val
+
+PREHOOK: query: -- More complicated select queries
+EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: t1
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            ListSink
+
+PREHOOK: query: SELECT key FROM (SELECT key, val FROM T1) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+1
+2
+3
+7
+8
+8
+PREHOOK: query: EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1) subq1
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: t1
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            ListSink
+
+PREHOOK: query: SELECT k FROM (SELECT key as k, val as v FROM T1) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+1
+2
+3
+7
+8
+8
+PREHOOK: query: SELECT key + 1 as k FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+2.0
+3.0
+4.0
+8.0
+9.0
+9.0
+PREHOOK: query: SELECT key + val as k FROM T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+12.0
+14.0
+16.0
+24.0
+26.0
+36.0
+PREHOOK: query: -- Work with union
+EXPLAIN
+SELECT * FROM (
+SELECT key as c FROM T1
+ UNION ALL
+SELECT val as c FROM T1
+) subq1
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 3 <- Union 2 (CONTAINS)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: val (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: no inputs
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT * FROM (
+SELECT key as c FROM T1
+ UNION ALL
+SELECT val as c FROM T1
+) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+1
+11
+12
+13
+17
+18
+2
+28
+3
+7
+8
+8
+PREHOOK: query: EXPLAIN
+SELECT * FROM (
+SELECT key as c FROM T1
+ UNION ALL
+SELECT key as c FROM T1
+) subq1
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 3 <- Union 2 (CONTAINS)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: no inputs
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT * FROM (
+SELECT key as c FROM T1
+ UNION ALL
+SELECT key as c FROM T1
+) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+1
+1
+2
+2
+3
+3
+7
+7
+8
+8
+8
+8
+PREHOOK: query: -- Work with insert overwrite
+FROM T1
+INSERT OVERWRITE TABLE T2 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE T3 SELECT key, sum(val) GROUP BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t2
+PREHOOK: Output: default@t3
+Table:default@t1
+Columns:key,val
+
+PREHOOK: query: -- Simple joins
+SELECT *
+FROM T1 JOIN T2
+ON T1.key = T2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+Table:default@t2
+Columns:key,val
+
+1	11	1	1
+2	12	2	1
+3	13	3	1
+7	17	7	1
+8	18	8	2
+8	28	8	2
+PREHOOK: query: EXPLAIN
+SELECT T1.key
+FROM T1 JOIN T2
+ON T1.key = T2.key
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT T1.key
+FROM T1 JOIN T2
+ON T1.key = T2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+Table:default@t2
+Columns:key
+
+1
+2
+3
+7
+8
+8
+PREHOOK: query: SELECT *
+FROM T1 JOIN T2
+ON T1.key = T2.key AND T1.val = T2.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+Table:default@t2
+Columns:key,val
+
+PREHOOK: query: -- Map join
+SELECT /*+ MAPJOIN(a) */ * 
+FROM T1 a JOIN T2 b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+Table:default@t2
+Columns:key,val
+
+1	11	1	1
+2	12	2	1
+3	13	3	1
+7	17	7	1
+8	18	8	2
+8	28	8	2
+PREHOOK: query: -- More joins
+EXPLAIN
+SELECT *
+FROM T1 JOIN T2
+ON T1.key = T2.key AND T1.val = 3 and T2.val = 3
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((UDFToDouble(val) = 3.0) and key is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), val (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((UDFToDouble(val) = 3.0) and key is not null) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), val (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT *
+FROM T1 JOIN T2
+ON T1.key = T2.key AND T1.val = 3 and T2.val = 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+Table:default@t2
+Columns:key,val
+
+PREHOOK: query: EXPLAIN
+SELECT subq1.val
+FROM 
+(
+  SELECT val FROM T1 WHERE key = 5  
+) subq1
+JOIN 
+(
+  SELECT val FROM T2 WHERE key = 6
+) subq2 
+ON subq1.val = subq2.val
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((UDFToDouble(key) = 5.0) and val is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: val (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((UDFToDouble(key) = 6.0) and val is not null) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: val (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT subq1.val
+FROM 
+(
+  SELECT val FROM T1 WHERE key = 5  
+) subq1
+JOIN 
+(
+  SELECT val FROM T2 WHERE key = 6
+) subq2 
+ON subq1.val = subq2.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key,val
+
+Table:default@t2
+Columns:key,val
+
+PREHOOK: query: -- Join followed by join
+EXPLAIN
+SELECT *
+FROM
+(
+  SELECT subq1.key as key
+  FROM
+  (
+    SELECT key, val FROM T1
+  ) subq1
+  JOIN
+  (
+    SELECT key, 'teststring' as val FROM T2
+  ) subq2
+  ON subq1.key = subq2.key
+) T4
+JOIN T3
+ON T3.key = T4.key
+PREHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: t3
+                  Statistics: Num rows: 5 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 5 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), val (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 5 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 5 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 5 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 5 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT *
+FROM
+(
+  SELECT subq1.key as key
+  FROM
+  (
+    SELECT key, val FROM T1
+  ) subq1
+  JOIN
+  (
+    SELECT key, 'teststring' as val FROM T2
+  ) subq2
+  ON subq1.key = subq2.key
+) T4
+JOIN T3
+ON T3.key = T4.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+Table:default@t1
+Columns:key
+
+Table:default@t2
+Columns:key
+
+Table:default@t3
+Columns:key,val
+
+1	1	11.0
+2	2	12.0
+3	3	13.0
+7	7	17.0
+8	8	46.0
+8	8	46.0
+PREHOOK: query: -- for partitioned table
+SELECT * FROM srcpart TABLESAMPLE (10 ROWS)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+Table:default@srcpart
+Columns:ds,hr,key,value
+
+165	val_165	2008-04-08	11
+238	val_238	2008-04-08	11
+255	val_255	2008-04-08	11
+27	val_27	2008-04-08	11
+278	val_278	2008-04-08	11
+311	val_311	2008-04-08	11
+409	val_409	2008-04-08	11
+484	val_484	2008-04-08	11
+86	val_86	2008-04-08	11
+98	val_98	2008-04-08	11
+PREHOOK: query: SELECT key,ds FROM srcpart TABLESAMPLE (10 ROWS) WHERE hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+#### A masked pattern was here ####
+Table:default@srcpart
+Columns:ds,hr,key
+
+165	2008-04-08
+238	2008-04-08
+255	2008-04-08
+27	2008-04-08
+278	2008-04-08
+311	2008-04-08
+409	2008-04-08
+484	2008-04-08
+86	2008-04-08
+98	2008-04-08
+PREHOOK: query: SELECT value FROM srcpart TABLESAMPLE (10 ROWS) WHERE ds='2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+Table:default@srcpart
+Columns:ds,value
+
+val_165
+val_238
+val_255
+val_27
+val_278
+val_311
+val_409
+val_484
+val_86
+val_98


[24/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/database.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/database.q.out b/ql/src/test/results/clientpositive/llap/database.q.out
new file mode 100644
index 0000000..8c2653c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/database.q.out
@@ -0,0 +1,1484 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: -- CREATE with comment
+CREATE DATABASE test_db COMMENT 'Hive test database'
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:test_db
+POSTHOOK: query: -- CREATE with comment
+CREATE DATABASE test_db COMMENT 'Hive test database'
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:test_db
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+test_db
+PREHOOK: query: -- CREATE INE already exists
+CREATE DATABASE IF NOT EXISTS test_db
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:test_db
+POSTHOOK: query: -- CREATE INE already exists
+CREATE DATABASE IF NOT EXISTS test_db
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:test_db
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+test_db
+PREHOOK: query: -- SHOW DATABASES synonym
+SHOW SCHEMAS
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: -- SHOW DATABASES synonym
+SHOW SCHEMAS
+POSTHOOK: type: SHOWDATABASES
+default
+test_db
+PREHOOK: query: -- DROP
+DROP DATABASE test_db
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:test_db
+PREHOOK: Output: database:test_db
+POSTHOOK: query: -- DROP
+DROP DATABASE test_db
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:test_db
+POSTHOOK: Output: database:test_db
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: -- CREATE INE doesn't exist
+CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database'
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:test_db
+POSTHOOK: query: -- CREATE INE doesn't exist
+CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database'
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:test_db
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+test_db
+PREHOOK: query: -- DROP IE exists
+DROP DATABASE IF EXISTS test_db
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:test_db
+PREHOOK: Output: database:test_db
+POSTHOOK: query: -- DROP IE exists
+DROP DATABASE IF EXISTS test_db
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:test_db
+POSTHOOK: Output: database:test_db
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: -- DROP IE doesn't exist
+DROP DATABASE IF EXISTS test_db
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: -- DROP IE doesn't exist
+DROP DATABASE IF EXISTS test_db
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: -- SHOW
+CREATE DATABASE test_db
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:test_db
+POSTHOOK: query: -- SHOW
+CREATE DATABASE test_db
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:test_db
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+test_db
+PREHOOK: query: -- SHOW pattern
+SHOW DATABASES LIKE 'test*'
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: -- SHOW pattern
+SHOW DATABASES LIKE 'test*'
+POSTHOOK: type: SHOWDATABASES
+test_db
+PREHOOK: query: -- SHOW pattern
+SHOW DATABASES LIKE '*ef*'
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: -- SHOW pattern
+SHOW DATABASES LIKE '*ef*'
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: USE test_db
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:test_db
+POSTHOOK: query: USE test_db
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:test_db
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+test_db
+PREHOOK: query: -- CREATE table in non-default DB
+CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:test_db
+PREHOOK: Output: test_db@test_table
+POSTHOOK: query: -- CREATE table in non-default DB
+CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:test_db
+POSTHOOK: Output: test_db@test_table
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:test_db
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:test_db
+test_table
+PREHOOK: query: -- DESCRIBE table in non-default DB
+DESCRIBE test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: test_db@test_table
+POSTHOOK: query: -- DESCRIBE table in non-default DB
+DESCRIBE test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: test_db@test_table
+col1                	string              	                    
+PREHOOK: query: -- DESCRIBE EXTENDED in non-default DB
+DESCRIBE EXTENDED test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: test_db@test_table
+POSTHOOK: query: -- DESCRIBE EXTENDED in non-default DB
+DESCRIBE EXTENDED test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: test_db@test_table
+col1                	string              	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: -- CREATE LIKE in non-default DB
+CREATE TABLE test_table_like LIKE test_table
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:test_db
+PREHOOK: Output: test_db@test_table_like
+POSTHOOK: query: -- CREATE LIKE in non-default DB
+CREATE TABLE test_table_like LIKE test_table
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:test_db
+POSTHOOK: Output: test_db@test_table_like
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:test_db
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:test_db
+test_table
+test_table_like
+PREHOOK: query: DESCRIBE EXTENDED test_table_like
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: test_db@test_table_like
+POSTHOOK: query: DESCRIBE EXTENDED test_table_like
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: test_db@test_table_like
+col1                	string              	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: -- LOAD and SELECT
+LOAD DATA LOCAL INPATH '../../data/files/test.dat'
+OVERWRITE INTO TABLE test_table
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: test_db@test_table
+POSTHOOK: query: -- LOAD and SELECT
+LOAD DATA LOCAL INPATH '../../data/files/test.dat'
+OVERWRITE INTO TABLE test_table
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: test_db@test_table
+PREHOOK: query: SELECT * FROM test_table
+PREHOOK: type: QUERY
+PREHOOK: Input: test_db@test_table
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM test_table
+POSTHOOK: type: QUERY
+POSTHOOK: Input: test_db@test_table
+#### A masked pattern was here ####
+1
+2
+3
+4
+5
+6
+PREHOOK: query: -- DROP and CREATE w/o LOAD
+DROP TABLE test_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: test_db@test_table
+PREHOOK: Output: test_db@test_table
+POSTHOOK: query: -- DROP and CREATE w/o LOAD
+DROP TABLE test_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: test_db@test_table
+POSTHOOK: Output: test_db@test_table
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:test_db
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:test_db
+test_table_like
+PREHOOK: query: CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:test_db
+PREHOOK: Output: test_db@test_table
+POSTHOOK: query: CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:test_db
+POSTHOOK: Output: test_db@test_table
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:test_db
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:test_db
+test_table
+test_table_like
+PREHOOK: query: SELECT * FROM test_table
+PREHOOK: type: QUERY
+PREHOOK: Input: test_db@test_table
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM test_table
+POSTHOOK: type: QUERY
+POSTHOOK: Input: test_db@test_table
+#### A masked pattern was here ####
+PREHOOK: query: -- CREATE table that already exists in DEFAULT
+USE test_db
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:test_db
+POSTHOOK: query: -- CREATE table that already exists in DEFAULT
+USE test_db
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:test_db
+PREHOOK: query: CREATE TABLE src (col1 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:test_db
+PREHOOK: Output: test_db@src
+POSTHOOK: query: CREATE TABLE src (col1 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:test_db
+POSTHOOK: Output: test_db@src
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:test_db
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:test_db
+src
+test_table
+test_table_like
+PREHOOK: query: SELECT * FROM src LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: test_db@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: test_db@src
+#### A masked pattern was here ####
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: SELECT * FROM src LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+165	val_165
+238	val_238
+255	val_255
+27	val_27
+278	val_278
+311	val_311
+409	val_409
+484	val_484
+86	val_86
+98	val_98
+PREHOOK: query: -- DROP DATABASE
+USE test_db
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:test_db
+POSTHOOK: query: -- DROP DATABASE
+USE test_db
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:test_db
+PREHOOK: query: DROP TABLE src
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: test_db@src
+PREHOOK: Output: test_db@src
+POSTHOOK: query: DROP TABLE src
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: test_db@src
+POSTHOOK: Output: test_db@src
+PREHOOK: query: DROP TABLE test_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: test_db@test_table
+PREHOOK: Output: test_db@test_table
+POSTHOOK: query: DROP TABLE test_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: test_db@test_table
+POSTHOOK: Output: test_db@test_table
+PREHOOK: query: DROP TABLE test_table_like
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: test_db@test_table_like
+PREHOOK: Output: test_db@test_table_like
+POSTHOOK: query: DROP TABLE test_table_like
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: test_db@test_table_like
+POSTHOOK: Output: test_db@test_table_like
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:test_db
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:test_db
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: DROP DATABASE test_db
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:test_db
+PREHOOK: Output: database:test_db
+POSTHOOK: query: DROP DATABASE test_db
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:test_db
+POSTHOOK: Output: database:test_db
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: -- DROP EMPTY DATABASE CASCADE
+CREATE DATABASE to_drop_db1
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:to_drop_db1
+POSTHOOK: query: -- DROP EMPTY DATABASE CASCADE
+CREATE DATABASE to_drop_db1
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:to_drop_db1
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+to_drop_db1
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: DROP DATABASE to_drop_db1 CASCADE
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:to_drop_db1
+PREHOOK: Output: database:to_drop_db1
+POSTHOOK: query: DROP DATABASE to_drop_db1 CASCADE
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:to_drop_db1
+POSTHOOK: Output: database:to_drop_db1
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: -- DROP NON-EMPTY DATABASE CASCADE
+CREATE DATABASE to_drop_db2
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:to_drop_db2
+POSTHOOK: query: -- DROP NON-EMPTY DATABASE CASCADE
+CREATE DATABASE to_drop_db2
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:to_drop_db2
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+to_drop_db2
+PREHOOK: query: USE to_drop_db2
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:to_drop_db2
+POSTHOOK: query: USE to_drop_db2
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:to_drop_db2
+PREHOOK: query: CREATE TABLE temp_tbl (c STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:to_drop_db2
+PREHOOK: Output: to_drop_db2@temp_tbl
+POSTHOOK: query: CREATE TABLE temp_tbl (c STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:to_drop_db2
+POSTHOOK: Output: to_drop_db2@temp_tbl
+PREHOOK: query: CREATE TABLE temp_tbl2 LIKE temp_tbl
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:to_drop_db2
+PREHOOK: Output: to_drop_db2@temp_tbl2
+POSTHOOK: query: CREATE TABLE temp_tbl2 LIKE temp_tbl
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:to_drop_db2
+POSTHOOK: Output: to_drop_db2@temp_tbl2
+PREHOOK: query: INSERT OVERWRITE TABLE temp_tbl2 SELECT COUNT(*) FROM temp_tbl
+PREHOOK: type: QUERY
+PREHOOK: Input: to_drop_db2@temp_tbl
+PREHOOK: Output: to_drop_db2@temp_tbl2
+POSTHOOK: query: INSERT OVERWRITE TABLE temp_tbl2 SELECT COUNT(*) FROM temp_tbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: to_drop_db2@temp_tbl
+POSTHOOK: Output: to_drop_db2@temp_tbl2
+POSTHOOK: Lineage: temp_tbl2.c EXPRESSION [(temp_tbl)temp_tbl.null, ]
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: DROP DATABASE to_drop_db2 CASCADE
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:to_drop_db2
+PREHOOK: Output: database:to_drop_db2
+PREHOOK: Output: to_drop_db2@temp_tbl
+PREHOOK: Output: to_drop_db2@temp_tbl2
+POSTHOOK: query: DROP DATABASE to_drop_db2 CASCADE
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:to_drop_db2
+POSTHOOK: Output: database:to_drop_db2
+POSTHOOK: Output: to_drop_db2@temp_tbl
+POSTHOOK: Output: to_drop_db2@temp_tbl2
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: -- DROP NON-EMPTY DATABASE CASCADE IF EXISTS
+CREATE DATABASE to_drop_db3
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:to_drop_db3
+POSTHOOK: query: -- DROP NON-EMPTY DATABASE CASCADE IF EXISTS
+CREATE DATABASE to_drop_db3
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:to_drop_db3
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+to_drop_db3
+PREHOOK: query: USE to_drop_db3
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:to_drop_db3
+POSTHOOK: query: USE to_drop_db3
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:to_drop_db3
+PREHOOK: query: CREATE TABLE temp_tbl (c STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:to_drop_db3
+PREHOOK: Output: to_drop_db3@temp_tbl
+POSTHOOK: query: CREATE TABLE temp_tbl (c STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:to_drop_db3
+POSTHOOK: Output: to_drop_db3@temp_tbl
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: DROP DATABASE IF EXISTS to_drop_db3 CASCADE
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:to_drop_db3
+PREHOOK: Output: database:to_drop_db3
+PREHOOK: Output: to_drop_db3@temp_tbl
+POSTHOOK: query: DROP DATABASE IF EXISTS to_drop_db3 CASCADE
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:to_drop_db3
+POSTHOOK: Output: database:to_drop_db3
+POSTHOOK: Output: to_drop_db3@temp_tbl
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: -- DROP NON-EXISTING DATABASE CASCADE IF EXISTS
+DROP DATABASE IF EXISTS non_exists_db3 CASCADE
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: -- DROP NON-EXISTING DATABASE CASCADE IF EXISTS
+DROP DATABASE IF EXISTS non_exists_db3 CASCADE
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: -- DROP NON-EXISTING DATABASE RESTRICT IF EXISTS
+DROP DATABASE IF EXISTS non_exists_db3 RESTRICT
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: -- DROP NON-EXISTING DATABASE RESTRICT IF EXISTS
+DROP DATABASE IF EXISTS non_exists_db3 RESTRICT
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: -- DROP EMPTY DATABASE RESTRICT
+CREATE DATABASE to_drop_db4
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:to_drop_db4
+POSTHOOK: query: -- DROP EMPTY DATABASE RESTRICT
+CREATE DATABASE to_drop_db4
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:to_drop_db4
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+to_drop_db4
+PREHOOK: query: DROP DATABASE to_drop_db4 RESTRICT
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:to_drop_db4
+PREHOOK: Output: database:to_drop_db4
+POSTHOOK: query: DROP DATABASE to_drop_db4 RESTRICT
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:to_drop_db4
+POSTHOOK: Output: database:to_drop_db4
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: --
+-- Canonical Name Tests
+--
+
+CREATE DATABASE db1
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:db1
+POSTHOOK: query: --
+-- Canonical Name Tests
+--
+
+CREATE DATABASE db1
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:db1
+PREHOOK: query: CREATE DATABASE db2
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:db2
+POSTHOOK: query: CREATE DATABASE db2
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:db2
+PREHOOK: query: -- CREATE foreign table
+CREATE TABLE db1.src(key STRING, value STRING)
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:db1
+PREHOOK: Output: db1@src
+POSTHOOK: query: -- CREATE foreign table
+CREATE TABLE db1.src(key STRING, value STRING)
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:db1
+POSTHOOK: Output: db1@src
+PREHOOK: query: -- LOAD into foreign table
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt'
+OVERWRITE INTO TABLE db1.src
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: db1@src
+POSTHOOK: query: -- LOAD into foreign table
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt'
+OVERWRITE INTO TABLE db1.src
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: db1@src
+PREHOOK: query: -- SELECT from foreign table
+SELECT * FROM db1.src
+PREHOOK: type: QUERY
+PREHOOK: Input: db1@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- SELECT from foreign table
+SELECT * FROM db1.src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: db1@src
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+100	val_100
+100	val_100
+103	val_103
+103	val_103
+104	val_104
+104	val_104
+105	val_105
+11	val_11
+111	val_111
+113	val_113
+113	val_113
+114	val_114
+116	val_116
+118	val_118
+118	val_118
+119	val_119
+119	val_119
+119	val_119
+12	val_12
+12	val_12
+120	val_120
+120	val_120
+125	val_125
+125	val_125
+126	val_126
+128	val_128
+128	val_128
+128	val_128
+129	val_129
+129	val_129
+131	val_131
+133	val_133
+134	val_134
+134	val_134
+136	val_136
+137	val_137
+137	val_137
+138	val_138
+138	val_138
+138	val_138
+138	val_138
+143	val_143
+145	val_145
+146	val_146
+146	val_146
+149	val_149
+149	val_149
+15	val_15
+15	val_15
+150	val_150
+152	val_152
+152	val_152
+153	val_153
+155	val_155
+156	val_156
+157	val_157
+158	val_158
+160	val_160
+162	val_162
+163	val_163
+164	val_164
+164	val_164
+165	val_165
+165	val_165
+166	val_166
+167	val_167
+167	val_167
+167	val_167
+168	val_168
+169	val_169
+169	val_169
+169	val_169
+169	val_169
+17	val_17
+170	val_170
+172	val_172
+172	val_172
+174	val_174
+174	val_174
+175	val_175
+175	val_175
+176	val_176
+176	val_176
+177	val_177
+178	val_178
+179	val_179
+179	val_179
+18	val_18
+18	val_18
+180	val_180
+181	val_181
+183	val_183
+186	val_186
+187	val_187
+187	val_187
+187	val_187
+189	val_189
+19	val_19
+190	val_190
+191	val_191
+191	val_191
+192	val_192
+193	val_193
+193	val_193
+193	val_193
+194	val_194
+195	val_195
+195	val_195
+196	val_196
+197	val_197
+197	val_197
+199	val_199
+199	val_199
+199	val_199
+2	val_2
+20	val_20
+200	val_200
+200	val_200
+201	val_201
+202	val_202
+203	val_203
+203	val_203
+205	val_205
+205	val_205
+207	val_207
+207	val_207
+208	val_208
+208	val_208
+208	val_208
+209	val_209
+209	val_209
+213	val_213
+213	val_213
+214	val_214
+216	val_216
+216	val_216
+217	val_217
+217	val_217
+218	val_218
+219	val_219
+219	val_219
+221	val_221
+221	val_221
+222	val_222
+223	val_223
+223	val_223
+224	val_224
+224	val_224
+226	val_226
+228	val_228
+229	val_229
+229	val_229
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+233	val_233
+233	val_233
+235	val_235
+237	val_237
+237	val_237
+238	val_238
+238	val_238
+239	val_239
+239	val_239
+24	val_24
+24	val_24
+241	val_241
+242	val_242
+242	val_242
+244	val_244
+247	val_247
+248	val_248
+249	val_249
+252	val_252
+255	val_255
+255	val_255
+256	val_256
+256	val_256
+257	val_257
+258	val_258
+26	val_26
+26	val_26
+260	val_260
+262	val_262
+263	val_263
+265	val_265
+265	val_265
+266	val_266
+27	val_27
+272	val_272
+272	val_272
+273	val_273
+273	val_273
+273	val_273
+274	val_274
+275	val_275
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+278	val_278
+278	val_278
+28	val_28
+280	val_280
+280	val_280
+281	val_281
+281	val_281
+282	val_282
+282	val_282
+283	val_283
+284	val_284
+285	val_285
+286	val_286
+287	val_287
+288	val_288
+288	val_288
+289	val_289
+291	val_291
+292	val_292
+296	val_296
+298	val_298
+298	val_298
+298	val_298
+30	val_30
+302	val_302
+305	val_305
+306	val_306
+307	val_307
+307	val_307
+308	val_308
+309	val_309
+309	val_309
+310	val_310
+311	val_311
+311	val_311
+311	val_311
+315	val_315
+316	val_316
+316	val_316
+316	val_316
+317	val_317
+317	val_317
+318	val_318
+318	val_318
+318	val_318
+321	val_321
+321	val_321
+322	val_322
+322	val_322
+323	val_323
+325	val_325
+325	val_325
+327	val_327
+327	val_327
+327	val_327
+33	val_33
+331	val_331
+331	val_331
+332	val_332
+333	val_333
+333	val_333
+335	val_335
+336	val_336
+338	val_338
+339	val_339
+34	val_34
+341	val_341
+342	val_342
+342	val_342
+344	val_344
+344	val_344
+345	val_345
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+35	val_35
+35	val_35
+35	val_35
+351	val_351
+353	val_353
+353	val_353
+356	val_356
+360	val_360
+362	val_362
+364	val_364
+365	val_365
+366	val_366
+367	val_367
+367	val_367
+368	val_368
+369	val_369
+369	val_369
+369	val_369
+37	val_37
+37	val_37
+373	val_373
+374	val_374
+375	val_375
+377	val_377
+378	val_378
+379	val_379
+382	val_382
+382	val_382
+384	val_384
+384	val_384
+384	val_384
+386	val_386
+389	val_389
+392	val_392
+393	val_393
+394	val_394
+395	val_395
+395	val_395
+396	val_396
+396	val_396
+396	val_396
+397	val_397
+397	val_397
+399	val_399
+399	val_399
+4	val_4
+400	val_400
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+402	val_402
+403	val_403
+403	val_403
+403	val_403
+404	val_404
+404	val_404
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+407	val_407
+409	val_409
+409	val_409
+409	val_409
+41	val_41
+411	val_411
+413	val_413
+413	val_413
+414	val_414
+414	val_414
+417	val_417
+417	val_417
+417	val_417
+418	val_418
+419	val_419
+42	val_42
+42	val_42
+421	val_421
+424	val_424
+424	val_424
+427	val_427
+429	val_429
+429	val_429
+43	val_43
+430	val_430
+430	val_430
+430	val_430
+431	val_431
+431	val_431
+431	val_431
+432	val_432
+435	val_435
+436	val_436
+437	val_437
+438	val_438
+438	val_438
+438	val_438
+439	val_439
+439	val_439
+44	val_44
+443	val_443
+444	val_444
+446	val_446
+448	val_448
+449	val_449
+452	val_452
+453	val_453
+454	val_454
+454	val_454
+454	val_454
+455	val_455
+457	val_457
+458	val_458
+458	val_458
+459	val_459
+459	val_459
+460	val_460
+462	val_462
+462	val_462
+463	val_463
+463	val_463
+466	val_466
+466	val_466
+466	val_466
+467	val_467
+468	val_468
+468	val_468
+468	val_468
+468	val_468
+469	val_469
+469	val_469
+469	val_469
+469	val_469
+469	val_469
+47	val_47
+470	val_470
+472	val_472
+475	val_475
+477	val_477
+478	val_478
+478	val_478
+479	val_479
+480	val_480
+480	val_480
+480	val_480
+481	val_481
+482	val_482
+483	val_483
+484	val_484
+485	val_485
+487	val_487
+489	val_489
+489	val_489
+489	val_489
+489	val_489
+490	val_490
+491	val_491
+492	val_492
+492	val_492
+493	val_493
+494	val_494
+495	val_495
+496	val_496
+497	val_497
+498	val_498
+498	val_498
+498	val_498
+5	val_5
+5	val_5
+5	val_5
+51	val_51
+51	val_51
+53	val_53
+54	val_54
+57	val_57
+58	val_58
+58	val_58
+64	val_64
+65	val_65
+66	val_66
+67	val_67
+67	val_67
+69	val_69
+70	val_70
+70	val_70
+70	val_70
+72	val_72
+72	val_72
+74	val_74
+76	val_76
+76	val_76
+77	val_77
+78	val_78
+8	val_8
+80	val_80
+82	val_82
+83	val_83
+83	val_83
+84	val_84
+84	val_84
+85	val_85
+86	val_86
+87	val_87
+9	val_9
+90	val_90
+90	val_90
+90	val_90
+92	val_92
+95	val_95
+95	val_95
+96	val_96
+97	val_97
+97	val_97
+98	val_98
+98	val_98
+PREHOOK: query: -- CREATE Partitioned foreign table
+CREATE TABLE db1.srcpart(key STRING, value STRING)
+PARTITIONED BY (ds STRING, hr STRING)
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:db1
+PREHOOK: Output: db1@srcpart
+POSTHOOK: query: -- CREATE Partitioned foreign table
+CREATE TABLE db1.srcpart(key STRING, value STRING)
+PARTITIONED BY (ds STRING, hr STRING)
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:db1
+POSTHOOK: Output: db1@srcpart
+PREHOOK: query: -- LOAD data into Partitioned foreign table
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt'
+OVERWRITE INTO TABLE db1.srcpart
+PARTITION (ds='2008-04-08', hr='11')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: db1@srcpart
+POSTHOOK: query: -- LOAD data into Partitioned foreign table
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt'
+OVERWRITE INTO TABLE db1.srcpart
+PARTITION (ds='2008-04-08', hr='11')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: db1@srcpart
+POSTHOOK: Output: db1@srcpart@ds=2008-04-08/hr=11
+PREHOOK: query: -- SELECT from Partitioned foreign table
+SELECT key, value FROM db1.srcpart
+WHERE key < 100 AND ds='2008-04-08' AND hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: db1@srcpart
+PREHOOK: Input: db1@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: -- SELECT from Partitioned foreign table
+SELECT key, value FROM db1.srcpart
+WHERE key < 100 AND ds='2008-04-08' AND hr='11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: db1@srcpart
+POSTHOOK: Input: db1@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+11	val_11
+12	val_12
+12	val_12
+15	val_15
+15	val_15
+17	val_17
+18	val_18
+18	val_18
+19	val_19
+2	val_2
+20	val_20
+24	val_24
+24	val_24
+26	val_26
+26	val_26
+27	val_27
+28	val_28
+30	val_30
+33	val_33
+34	val_34
+35	val_35
+35	val_35
+35	val_35
+37	val_37
+37	val_37
+4	val_4
+41	val_41
+42	val_42
+42	val_42
+43	val_43
+44	val_44
+47	val_47
+5	val_5
+5	val_5
+5	val_5
+51	val_51
+51	val_51
+53	val_53
+54	val_54
+57	val_57
+58	val_58
+58	val_58
+64	val_64
+65	val_65
+66	val_66
+67	val_67
+67	val_67
+69	val_69
+70	val_70
+70	val_70
+70	val_70
+72	val_72
+72	val_72
+74	val_74
+76	val_76
+76	val_76
+77	val_77
+78	val_78
+8	val_8
+80	val_80
+82	val_82
+83	val_83
+83	val_83
+84	val_84
+84	val_84
+85	val_85
+86	val_86
+87	val_87
+9	val_9
+90	val_90
+90	val_90
+90	val_90
+92	val_92
+95	val_95
+95	val_95
+96	val_96
+97	val_97
+97	val_97
+98	val_98
+98	val_98
+PREHOOK: query: -- SELECT JOINed product of two foreign tables
+USE db2
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:db2
+POSTHOOK: query: -- SELECT JOINed product of two foreign tables
+USE db2
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:db2
+PREHOOK: query: SELECT a.* FROM db1.src a JOIN default.src1 b
+ON (a.key = b.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: db1@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.* FROM db1.src a JOIN default.src1 b
+ON (a.key = b.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: db1@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+128	val_128
+128	val_128
+128	val_128
+146	val_146
+146	val_146
+150	val_150
+213	val_213
+213	val_213
+224	val_224
+224	val_224
+238	val_238
+238	val_238
+255	val_255
+255	val_255
+273	val_273
+273	val_273
+273	val_273
+278	val_278
+278	val_278
+311	val_311
+311	val_311
+311	val_311
+369	val_369
+369	val_369
+369	val_369
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+66	val_66
+98	val_98
+98	val_98
+PREHOOK: query: -- CREATE TABLE AS SELECT from foreign table
+CREATE TABLE conflict_name AS
+SELECT value FROM default.src WHERE key = 66
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:db2
+PREHOOK: Output: db2@conflict_name
+POSTHOOK: query: -- CREATE TABLE AS SELECT from foreign table
+CREATE TABLE conflict_name AS
+SELECT value FROM default.src WHERE key = 66
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:db2
+POSTHOOK: Output: db2@conflict_name
+POSTHOOK: Lineage: conflict_name.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- CREATE foreign table
+CREATE TABLE db1.conflict_name AS
+SELECT value FROM db1.src WHERE key = 8
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: db1@src
+PREHOOK: Output: database:db1
+PREHOOK: Output: db1@conflict_name
+POSTHOOK: query: -- CREATE foreign table
+CREATE TABLE db1.conflict_name AS
+SELECT value FROM db1.src WHERE key = 8
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: db1@src
+POSTHOOK: Output: database:db1
+POSTHOOK: Output: db1@conflict_name
+POSTHOOK: Lineage: conflict_name.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: -- query tables with the same names in different DBs
+SELECT * FROM (
+  SELECT value FROM db1.conflict_name
+UNION ALL
+  SELECT value FROM conflict_name
+) subq ORDER BY value
+PREHOOK: type: QUERY
+PREHOOK: Input: db1@conflict_name
+PREHOOK: Input: db2@conflict_name
+#### A masked pattern was here ####
+POSTHOOK: query: -- query tables with the same names in different DBs
+SELECT * FROM (
+  SELECT value FROM db1.conflict_name
+UNION ALL
+  SELECT value FROM conflict_name
+) subq ORDER BY value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: db1@conflict_name
+POSTHOOK: Input: db2@conflict_name
+#### A masked pattern was here ####
+val_66
+val_8
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: SELECT * FROM (
+  SELECT value FROM db1.conflict_name
+UNION ALL
+  SELECT value FROM db2.conflict_name
+) subq
+PREHOOK: type: QUERY
+PREHOOK: Input: db1@conflict_name
+PREHOOK: Input: db2@conflict_name
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM (
+  SELECT value FROM db1.conflict_name
+UNION ALL
+  SELECT value FROM db2.conflict_name
+) subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: db1@conflict_name
+POSTHOOK: Input: db2@conflict_name
+#### A masked pattern was here ####
+val_66
+val_8
+PREHOOK: query: -- TABLESAMPLES
+CREATE TABLE bucketized_src (key INT, value STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketized_src
+POSTHOOK: query: -- TABLESAMPLES
+CREATE TABLE bucketized_src (key INT, value STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketized_src
+PREHOOK: query: INSERT OVERWRITE TABLE bucketized_src
+SELECT key, value FROM src WHERE key=66
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucketized_src
+POSTHOOK: query: INSERT OVERWRITE TABLE bucketized_src
+SELECT key, value FROM src WHERE key=66
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucketized_src
+POSTHOOK: Lineage: bucketized_src.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucketized_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT key FROM bucketized_src TABLESAMPLE(BUCKET 1 out of 1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketized_src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key FROM bucketized_src TABLESAMPLE(BUCKET 1 out of 1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketized_src
+#### A masked pattern was here ####
+66
+PREHOOK: query: -- CREATE TABLE LIKE
+CREATE TABLE db2.src1 LIKE default.src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:db2
+PREHOOK: Output: db2@src1
+POSTHOOK: query: -- CREATE TABLE LIKE
+CREATE TABLE db2.src1 LIKE default.src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:db2
+POSTHOOK: Output: db2@src1
+PREHOOK: query: USE db2
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:db2
+POSTHOOK: query: USE db2
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:db2
+PREHOOK: query: DESC EXTENDED src1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: db2@src1
+POSTHOOK: query: DESC EXTENDED src1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: db2@src1
+key                 	string              	default             
+value               	string              	default             
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: -- character escaping
+SELECT key FROM `default`.src ORDER BY key LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- character escaping
+SELECT key FROM `default`.src ORDER BY key LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+PREHOOK: query: SELECT key FROM `default`.`src` ORDER BY key LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key FROM `default`.`src` ORDER BY key LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+PREHOOK: query: SELECT key FROM default.`src` ORDER BY key LIMIT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key FROM default.`src` ORDER BY key LIMIT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+PREHOOK: query: USE default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: USE default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/drop_partition_with_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/drop_partition_with_stats.q.out b/ql/src/test/results/clientpositive/llap/drop_partition_with_stats.q.out
new file mode 100644
index 0000000..e27e557
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/drop_partition_with_stats.q.out
@@ -0,0 +1,496 @@
+PREHOOK: query: -- This test verifies that a table partition could be dropped with columns stats computed
+-- The column stats for a partitioned table will go to PART_COL_STATS
+CREATE DATABASE IF NOT EXISTS partstatsdb1
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:partstatsdb1
+POSTHOOK: query: -- This test verifies that a table partition could be dropped with columns stats computed
+-- The column stats for a partitioned table will go to PART_COL_STATS
+CREATE DATABASE IF NOT EXISTS partstatsdb1
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:partstatsdb1
+PREHOOK: query: USE partstatsdb1
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:partstatsdb1
+POSTHOOK: query: USE partstatsdb1
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:partstatsdb1
+PREHOOK: query: CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:partstatsdb1
+PREHOOK: Output: partstatsdb1@testtable
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:partstatsdb1
+POSTHOOK: Output: partstatsdb1@testtable
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb1@testtable
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb1@testtable
+POSTHOOK: Output: partstatsdb1@testtable@part1=p11/part2=P12
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p21', Part2='P22')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb1@testtable
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p21', Part2='P22')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb1@testtable
+POSTHOOK: Output: partstatsdb1@testtable@part1=p21/part2=P22
+PREHOOK: query: ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb1@testtable
+PREHOOK: Input: partstatsdb1@testtable@part1=p11/part2=P12
+PREHOOK: Input: partstatsdb1@testtable@part1=p21/part2=P22
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb1@testtable
+POSTHOOK: Input: partstatsdb1@testtable@part1=p11/part2=P12
+POSTHOOK: Input: partstatsdb1@testtable@part1=p21/part2=P22
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE testtable PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb1@testtable
+PREHOOK: Input: partstatsdb1@testtable@part1=p11/part2=P12
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE testtable PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb1@testtable
+POSTHOOK: Input: partstatsdb1@testtable@part1=p11/part2=P12
+#### A masked pattern was here ####
+PREHOOK: query: CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:partstatsdb1
+PREHOOK: Output: partstatsdb1@TestTable1
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:partstatsdb1
+POSTHOOK: Output: partstatsdb1@TestTable1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P11')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb1@testtable1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P11')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb1@testtable1
+POSTHOOK: Output: partstatsdb1@testtable1@part1=p11/part2=P11
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb1@testtable1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb1@testtable1
+POSTHOOK: Output: partstatsdb1@testtable1@part1=p11/part2=P12
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p21', Part2='P22')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb1@testtable1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p21', Part2='P22')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb1@testtable1
+POSTHOOK: Output: partstatsdb1@testtable1@part1=p21/part2=P22
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p31', Part2='P32')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb1@testtable1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p31', Part2='P32')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb1@testtable1
+POSTHOOK: Output: partstatsdb1@testtable1@part1=p31/part2=P32
+PREHOOK: query: ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb1@testtable1
+PREHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P11
+PREHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P12
+PREHOOK: Input: partstatsdb1@testtable1@part1=p21/part2=P22
+PREHOOK: Input: partstatsdb1@testtable1@part1=p31/part2=P32
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb1@testtable1
+POSTHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P11
+POSTHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P12
+POSTHOOK: Input: partstatsdb1@testtable1@part1=p21/part2=P22
+POSTHOOK: Input: partstatsdb1@testtable1@part1=p31/part2=P32
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE TestTable1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb1@testtable1
+PREHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P11
+PREHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P12
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TestTable1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb1@testtable1
+POSTHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P11
+POSTHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P12
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE TestTable1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb1@testtable1
+PREHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P12
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TestTable1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb1@testtable1
+POSTHOOK: Input: partstatsdb1@testtable1@part1=p11/part2=P12
+#### A masked pattern was here ####
+PREHOOK: query: CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:partstatsdb1
+PREHOOK: Output: partstatsdb1@TESTTABLE2
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:partstatsdb1
+POSTHOOK: Output: partstatsdb1@TESTTABLE2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb1@testtable2
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb1@testtable2
+POSTHOOK: Output: partstatsdb1@testtable2@part1=p11/part2=P12
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p21', Part2='P22')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb1@testtable2
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p21', Part2='P22')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb1@testtable2
+POSTHOOK: Output: partstatsdb1@testtable2@part1=p21/part2=P22
+PREHOOK: query: ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb1@testtable2
+PREHOOK: Input: partstatsdb1@testtable2@part1=p11/part2=P12
+PREHOOK: Input: partstatsdb1@testtable2@part1=p21/part2=P22
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb1@testtable2
+POSTHOOK: Input: partstatsdb1@testtable2@part1=p11/part2=P12
+POSTHOOK: Input: partstatsdb1@testtable2@part1=p21/part2=P22
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb1@testtable2
+PREHOOK: Input: partstatsdb1@testtable2@part1=p11/part2=P12
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb1@testtable2
+POSTHOOK: Input: partstatsdb1@testtable2@part1=p11/part2=P12
+#### A masked pattern was here ####
+PREHOOK: query: ALTER TABLE partstatsdb1.testtable DROP PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: partstatsdb1@testtable
+PREHOOK: Output: partstatsdb1@testtable@part1=p11/part2=P12
+POSTHOOK: query: ALTER TABLE partstatsdb1.testtable DROP PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: partstatsdb1@testtable
+POSTHOOK: Output: partstatsdb1@testtable@part1=p11/part2=P12
+PREHOOK: query: ALTER TABLE partstatsdb1.TestTable1 DROP PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: partstatsdb1@testtable1
+PREHOOK: Output: partstatsdb1@testtable1@part1=p11/part2=P12
+POSTHOOK: query: ALTER TABLE partstatsdb1.TestTable1 DROP PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: partstatsdb1@testtable1
+POSTHOOK: Output: partstatsdb1@testtable1@part1=p11/part2=P12
+PREHOOK: query: ALTER TABLE partstatsdb1.TESTTABLE2 DROP PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: partstatsdb1@testtable2
+PREHOOK: Output: partstatsdb1@testtable2@part1=p11/part2=P12
+POSTHOOK: query: ALTER TABLE partstatsdb1.TESTTABLE2 DROP PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: partstatsdb1@testtable2
+POSTHOOK: Output: partstatsdb1@testtable2@part1=p11/part2=P12
+PREHOOK: query: DROP TABLE partstatsdb1.testtable
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: partstatsdb1@testtable
+PREHOOK: Output: partstatsdb1@testtable
+POSTHOOK: query: DROP TABLE partstatsdb1.testtable
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: partstatsdb1@testtable
+POSTHOOK: Output: partstatsdb1@testtable
+PREHOOK: query: DROP TABLE partstatsdb1.TestTable1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: partstatsdb1@testtable1
+PREHOOK: Output: partstatsdb1@testtable1
+POSTHOOK: query: DROP TABLE partstatsdb1.TestTable1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: partstatsdb1@testtable1
+POSTHOOK: Output: partstatsdb1@testtable1
+PREHOOK: query: DROP TABLE partstatsdb1.TESTTABLE2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: partstatsdb1@testtable2
+PREHOOK: Output: partstatsdb1@testtable2
+POSTHOOK: query: DROP TABLE partstatsdb1.TESTTABLE2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: partstatsdb1@testtable2
+POSTHOOK: Output: partstatsdb1@testtable2
+PREHOOK: query: DROP DATABASE partstatsdb1
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:partstatsdb1
+PREHOOK: Output: database:partstatsdb1
+POSTHOOK: query: DROP DATABASE partstatsdb1
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:partstatsdb1
+POSTHOOK: Output: database:partstatsdb1
+PREHOOK: query: CREATE DATABASE IF NOT EXISTS PARTSTATSDB2
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:PARTSTATSDB2
+POSTHOOK: query: CREATE DATABASE IF NOT EXISTS PARTSTATSDB2
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:PARTSTATSDB2
+PREHOOK: query: USE PARTSTATSDB2
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:partstatsdb2
+POSTHOOK: query: USE PARTSTATSDB2
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:partstatsdb2
+PREHOOK: query: CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: PARTSTATSDB2@testtable
+PREHOOK: Output: database:partstatsdb2
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS testtable (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: PARTSTATSDB2@testtable
+POSTHOOK: Output: database:partstatsdb2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb2@testtable
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb2@testtable
+POSTHOOK: Output: partstatsdb2@testtable@part1=p11/part2=P12
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p21', Part2='P22')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb2@testtable
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE testtable PARTITION (part1='p21', Part2='P22')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb2@testtable
+POSTHOOK: Output: partstatsdb2@testtable@part1=p21/part2=P22
+PREHOOK: query: ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb2@testtable
+PREHOOK: Input: partstatsdb2@testtable@part1=p11/part2=P12
+PREHOOK: Input: partstatsdb2@testtable@part1=p21/part2=P22
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE testtable COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb2@testtable
+POSTHOOK: Input: partstatsdb2@testtable@part1=p11/part2=P12
+POSTHOOK: Input: partstatsdb2@testtable@part1=p21/part2=P22
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE testtable PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb2@testtable
+PREHOOK: Input: partstatsdb2@testtable@part1=p11/part2=P12
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE testtable PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb2@testtable
+POSTHOOK: Input: partstatsdb2@testtable@part1=p11/part2=P12
+#### A masked pattern was here ####
+PREHOOK: query: CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: PARTSTATSDB2@TestTable1
+PREHOOK: Output: database:partstatsdb2
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS TestTable1 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: PARTSTATSDB2@TestTable1
+POSTHOOK: Output: database:partstatsdb2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P11')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb2@testtable1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P11')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb2@testtable1
+POSTHOOK: Output: partstatsdb2@testtable1@part1=p11/part2=P11
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb2@testtable1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb2@testtable1
+POSTHOOK: Output: partstatsdb2@testtable1@part1=p11/part2=P12
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p21', Part2='P22')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb2@testtable1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p21', Part2='P22')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb2@testtable1
+POSTHOOK: Output: partstatsdb2@testtable1@part1=p21/part2=P22
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p31', Part2='P32')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb2@testtable1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TestTable1 PARTITION (part1='p31', Part2='P32')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb2@testtable1
+POSTHOOK: Output: partstatsdb2@testtable1@part1=p31/part2=P32
+PREHOOK: query: ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb2@testtable1
+PREHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P11
+PREHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P12
+PREHOOK: Input: partstatsdb2@testtable1@part1=p21/part2=P22
+PREHOOK: Input: partstatsdb2@testtable1@part1=p31/part2=P32
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TestTable1 COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb2@testtable1
+POSTHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P11
+POSTHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P12
+POSTHOOK: Input: partstatsdb2@testtable1@part1=p21/part2=P22
+POSTHOOK: Input: partstatsdb2@testtable1@part1=p31/part2=P32
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE TestTable1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb2@testtable1
+PREHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P11
+PREHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P12
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TestTable1 PARTITION (part1='p11') COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb2@testtable1
+POSTHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P11
+POSTHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P12
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE TestTable1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb2@testtable1
+PREHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P12
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TestTable1 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb2@testtable1
+POSTHOOK: Input: partstatsdb2@testtable1@part1=p11/part2=P12
+#### A masked pattern was here ####
+PREHOOK: query: CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: PARTSTATSDB2@TESTTABLE2
+PREHOOK: Output: database:partstatsdb2
+POSTHOOK: query: CREATE TABLE IF NOT EXISTS TESTTABLE2 (key STRING, value STRING) PARTITIONED BY (part1 STRING, Part2 STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: PARTSTATSDB2@TESTTABLE2
+POSTHOOK: Output: database:partstatsdb2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb2@testtable2
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb2@testtable2
+POSTHOOK: Output: partstatsdb2@testtable2@part1=p11/part2=P12
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p21', Part2='P22')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: partstatsdb2@testtable2
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE TESTTABLE2 PARTITION (part1='p21', Part2='P22')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: partstatsdb2@testtable2
+POSTHOOK: Output: partstatsdb2@testtable2@part1=p21/part2=P22
+PREHOOK: query: ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb2@testtable2
+PREHOOK: Input: partstatsdb2@testtable2@part1=p11/part2=P12
+PREHOOK: Input: partstatsdb2@testtable2@part1=p21/part2=P22
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TESTTABLE2 COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb2@testtable2
+POSTHOOK: Input: partstatsdb2@testtable2@part1=p11/part2=P12
+POSTHOOK: Input: partstatsdb2@testtable2@part1=p21/part2=P22
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+PREHOOK: type: QUERY
+PREHOOK: Input: partstatsdb2@testtable2
+PREHOOK: Input: partstatsdb2@testtable2@part1=p11/part2=P12
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE TESTTABLE2 PARTITION (part1='p11', Part2='P12') COMPUTE STATISTICS FOR COLUMNS key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: partstatsdb2@testtable2
+POSTHOOK: Input: partstatsdb2@testtable2@part1=p11/part2=P12
+#### A masked pattern was here ####
+PREHOOK: query: ALTER TABLE PARTSTATSDB2.testtable DROP PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: partstatsdb2@testtable
+PREHOOK: Output: partstatsdb2@testtable@part1=p11/part2=P12
+POSTHOOK: query: ALTER TABLE PARTSTATSDB2.testtable DROP PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: partstatsdb2@testtable
+POSTHOOK: Output: partstatsdb2@testtable@part1=p11/part2=P12
+PREHOOK: query: ALTER TABLE PARTSTATSDB2.TestTable1 DROP PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: partstatsdb2@testtable1
+PREHOOK: Output: partstatsdb2@testtable1@part1=p11/part2=P12
+POSTHOOK: query: ALTER TABLE PARTSTATSDB2.TestTable1 DROP PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: partstatsdb2@testtable1
+POSTHOOK: Output: partstatsdb2@testtable1@part1=p11/part2=P12
+PREHOOK: query: ALTER TABLE PARTSTATSDB2.TESTTABLE2 DROP PARTITION (part1='p11', Part2='P12')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: partstatsdb2@testtable2
+PREHOOK: Output: partstatsdb2@testtable2@part1=p11/part2=P12
+POSTHOOK: query: ALTER TABLE PARTSTATSDB2.TESTTABLE2 DROP PARTITION (part1='p11', Part2='P12')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: partstatsdb2@testtable2
+POSTHOOK: Output: partstatsdb2@testtable2@part1=p11/part2=P12
+PREHOOK: query: DROP TABLE PARTSTATSDB2.testtable
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: partstatsdb2@testtable
+PREHOOK: Output: partstatsdb2@testtable
+POSTHOOK: query: DROP TABLE PARTSTATSDB2.testtable
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: partstatsdb2@testtable
+POSTHOOK: Output: partstatsdb2@testtable
+PREHOOK: query: DROP TABLE PARTSTATSDB2.TestTable1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: partstatsdb2@testtable1
+PREHOOK: Output: partstatsdb2@testtable1
+POSTHOOK: query: DROP TABLE PARTSTATSDB2.TestTable1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: partstatsdb2@testtable1
+POSTHOOK: Output: partstatsdb2@testtable1
+PREHOOK: query: DROP TABLE PARTSTATSDB2.TESTTABLE2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: partstatsdb2@testtable2
+PREHOOK: Output: partstatsdb2@testtable2
+POSTHOOK: query: DROP TABLE PARTSTATSDB2.TESTTABLE2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: partstatsdb2@testtable2
+POSTHOOK: Output: partstatsdb2@testtable2
+PREHOOK: query: DROP DATABASE PARTSTATSDB2
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:partstatsdb2
+PREHOOK: Output: database:partstatsdb2
+POSTHOOK: query: DROP DATABASE PARTSTATSDB2
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:partstatsdb2
+POSTHOOK: Output: database:partstatsdb2


[30/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out
new file mode 100644
index 0000000..475a9e3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin3.q.out
@@ -0,0 +1,830 @@
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+PREHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part_2
+                    numFiles 2
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 3062
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part_2
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part_2
+                  name: default.srcbucket_mapjoin_part_2
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part_2/ds=2008-04-08 [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col7
+                Position of Big Table: 1
+                Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 0
+                          numRows 0
+                          rawDataSize 0
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 0
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+564
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+564
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0
+PREHOOK: query: explain extended 
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended 
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part_2
+                    numFiles 2
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 3062
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part_2
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part_2
+                  name: default.srcbucket_mapjoin_part_2
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part_2/ds=2008-04-08 [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col7
+                Position of Big Table: 1
+                Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 1
+                          numRows 564
+                          rawDataSize 10503
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 11067
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 1
+                numRows 564
+                rawDataSize 10503
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 11067
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+564
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part_2 a join srcbucket_mapjoin_part b 
+on a.key=b.key and b.ds="2008-04-08" and a.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+564
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out
new file mode 100644
index 0000000..4815a1c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin4.q.out
@@ -0,0 +1,798 @@
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+PREHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    numRows 0
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      numRows 0
+                      rawDataSize 0
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    numRows 0
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      numRows 0
+                      rawDataSize 0
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col6
+                Position of Big Table: 0
+                Statistics: Num rows: 28 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 28 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 28 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 0
+                          numRows 0
+                          rawDataSize 0
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 0
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    numRows 0
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      numRows 0
+                      rawDataSize 0
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    numRows 0
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      numRows 0
+                      rawDataSize 0
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col6
+                Position of Big Table: 0
+                Statistics: Num rows: 28 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 28 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 28 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 1
+                          numRows 464
+                          rawDataSize 8519
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 8983
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 1
+                numRows 464
+                rawDataSize 8519
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 8983
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin a join srcbucket_mapjoin b
+on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0


[17/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
new file mode 100644
index 0000000..eb67a6e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
@@ -0,0 +1,1507 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table src_10 as select * from src limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_10
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table src_10 as select * from src limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_10
+POSTHOOK: Lineage: src_10.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_10.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create table src_lv1 (key string, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_lv1
+POSTHOOK: query: create table src_lv1 (key string, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_lv1
+PREHOOK: query: create table src_lv2 (key string, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_lv2
+POSTHOOK: query: create table src_lv2 (key string, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_lv2
+PREHOOK: query: create table src_lv3 (key string, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_lv3
+POSTHOOK: query: create table src_lv3 (key string, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_lv3
+PREHOOK: query: -- 2LV
+-- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-FS[12]
+--             -SEL[3]-UDTF[4]-LVJ[5]
+--      -LVF[6]-SEL[7]-LVJ[10]-SEL[13]-FS[14]
+--             -SEL[8]-UDTF[9]-LVJ[10]
+explain
+from src_10
+insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C
+insert overwrite table src_lv2 select key, C lateral view explode(array(key+3, key+4)) A as C
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2LV
+-- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-FS[12]
+--             -SEL[3]-UDTF[4]-LVJ[5]
+--      -LVF[6]-SEL[7]-LVJ[10]-SEL[13]-FS[14]
+--             -SEL[8]-UDTF[9]-LVJ[10]
+explain
+from src_10
+insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C
+insert overwrite table src_lv2 select key, C lateral view explode(array(key+3, key+4)) A as C
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src_10
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col5
+                        Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: string), _col5 (type: double)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                name: default.src_lv1
+                    Select Operator
+                      expressions: array((key + 1),(key + 2)) (type: array<double>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col5
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Select Operator
+                            expressions: _col0 (type: string), _col5 (type: double)
+                            outputColumnNames: _col0, _col1
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            File Output Operator
+                              compressed: false
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                              table:
+                                  input format: org.apache.hadoop.mapred.TextInputFormat
+                                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                  name: default.src_lv1
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col5
+                        Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: string), _col5 (type: double)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                name: default.src_lv2
+                    Select Operator
+                      expressions: array((key + 3),(key + 4)) (type: array<double>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col5
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Select Operator
+                            expressions: _col0 (type: string), _col5 (type: double)
+                            outputColumnNames: _col0, _col1
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            File Output Operator
+                              compressed: false
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                              table:
+                                  input format: org.apache.hadoop.mapred.TextInputFormat
+                                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                  name: default.src_lv2
+            Execution mode: llap
+            LLAP IO: no inputs
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src_10
+insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C
+insert overwrite table src_lv2 select key, C lateral view explode(array(key+3, key+4)) A as C
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_10
+PREHOOK: Output: default@src_lv1
+PREHOOK: Output: default@src_lv2
+POSTHOOK: query: from src_10
+insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C
+insert overwrite table src_lv2 select key, C lateral view explode(array(key+3, key+4)) A as C
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_10
+POSTHOOK: Output: default@src_lv1
+POSTHOOK: Output: default@src_lv2
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: select * from src_lv1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+165	166.0
+165	167.0
+238	239.0
+238	240.0
+255	256.0
+255	257.0
+27	28.0
+27	29.0
+278	279.0
+278	280.0
+311	312.0
+311	313.0
+409	410.0
+409	411.0
+484	485.0
+484	486.0
+86	87.0
+86	88.0
+98	100.0
+98	99.0
+PREHOOK: query: select * from src_lv2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+165	168.0
+165	169.0
+238	241.0
+238	242.0
+255	258.0
+255	259.0
+27	30.0
+27	31.0
+278	281.0
+278	282.0
+311	314.0
+311	315.0
+409	412.0
+409	413.0
+484	487.0
+484	488.0
+86	89.0
+86	90.0
+98	101.0
+98	102.0
+PREHOOK: query: -- 2(LV+GBY)
+-- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-GBY[12]-RS[13]-GBY[14]-SEL[15]-FS[16]
+--             -SEL[3]-UDTF[4]-LVJ[5]
+--      -LVF[6]-SEL[7]-LVJ[10]-SEL[17]-GBY[18]-RS[19]-GBY[20]-SEL[21]-FS[22]
+--             -SEL[8]-UDTF[9]-LVJ[10]
+explain
+from src_10
+insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, sum(C) lateral view explode(array(key+3, key+4)) A as C group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2(LV+GBY)
+-- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-GBY[12]-RS[13]-GBY[14]-SEL[15]-FS[16]
+--             -SEL[3]-UDTF[4]-LVJ[5]
+--      -LVF[6]-SEL[7]-LVJ[10]-SEL[17]-GBY[18]-RS[19]-GBY[20]-SEL[21]-FS[22]
+--             -SEL[8]-UDTF[9]-LVJ[10]
+explain
+from src_10
+insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, sum(C) lateral view explode(array(key+3, key+4)) A as C group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src_10
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col5
+                        Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: sum(_col5)
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: string)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: string)
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: double)
+                    Select Operator
+                      expressions: array((key + 1),(key + 2)) (type: array<double>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col5
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Group By Operator
+                            aggregations: sum(_col5)
+                            keys: _col0 (type: string)
+                            mode: hash
+                            outputColumnNames: _col0, _col1
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string)
+                              sort order: +
+                              Map-reduce partition columns: _col0 (type: string)
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                              value expressions: _col1 (type: double)
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col5
+                        Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: sum(_col5)
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: string)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: string)
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: double)
+                    Select Operator
+                      expressions: array((key + 3),(key + 4)) (type: array<double>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col5
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Group By Operator
+                            aggregations: sum(_col5)
+                            keys: _col0 (type: string)
+                            mode: hash
+                            outputColumnNames: _col0, _col1
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string)
+                              sort order: +
+                              Map-reduce partition columns: _col0 (type: string)
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                              value expressions: _col1 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.src_lv1
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.src_lv2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src_10
+insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, sum(C) lateral view explode(array(key+3, key+4)) A as C group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_10
+PREHOOK: Output: default@src_lv1
+PREHOOK: Output: default@src_lv2
+POSTHOOK: query: from src_10
+insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, sum(C) lateral view explode(array(key+3, key+4)) A as C group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_10
+POSTHOOK: Output: default@src_lv1
+POSTHOOK: Output: default@src_lv2
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: select * from src_lv1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+165	333.0
+238	479.0
+255	513.0
+27	57.0
+278	559.0
+311	625.0
+409	821.0
+484	971.0
+86	175.0
+98	199.0
+PREHOOK: query: select * from src_lv2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+165	337.0
+238	483.0
+255	517.0
+27	61.0
+278	563.0
+311	629.0
+409	825.0
+484	975.0
+86	179.0
+98	203.0
+PREHOOK: query: -- (LV+GBY) + RS:2GBY
+-- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[6]-GBY[7]-RS[8]-GBY[9]-SEL[10]-FS[11]
+--             -SEL[3]-UDTF[4]-LVJ[5]
+--      -FIL[12]-SEL[13]-RS[14]-FOR[15]-FIL[16]-GBY[17]-SEL[18]-FS[19]
+--                                     -FIL[20]-GBY[21]-SEL[22]-FS[23]
+explain
+from src_10
+insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, count(value) where key > 200 group by key
+insert overwrite table src_lv3 select key, count(value) where key < 200 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- (LV+GBY) + RS:2GBY
+-- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[6]-GBY[7]-RS[8]-GBY[9]-SEL[10]-FS[11]
+--             -SEL[3]-UDTF[4]-LVJ[5]
+--      -FIL[12]-SEL[13]-RS[14]-FOR[15]-FIL[16]-GBY[17]-SEL[18]-FS[19]
+--                                     -FIL[20]-GBY[21]-SEL[22]-FS[23]
+explain
+from src_10
+insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, count(value) where key > 200 group by key
+insert overwrite table src_lv3 select key, count(value) where key < 200 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-4 depends on stages: Stage-3
+  Stage-0 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-4
+  Stage-6 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-4
+  Stage-7 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-3
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src_10
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col5
+                        Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: sum(_col5)
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: string)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: string)
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: double)
+                    Select Operator
+                      expressions: array((key + 1),(key + 2)) (type: array<double>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col5
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Group By Operator
+                            aggregations: sum(_col5)
+                            keys: _col0 (type: string)
+                            mode: hash
+                            outputColumnNames: _col0, _col1
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string)
+                              sort order: +
+                              Map-reduce partition columns: _col0 (type: string)
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                              value expressions: _col1 (type: double)
+                  Filter Operator
+                    predicate: ((key < 200) or (key > 200)) (type: boolean)
+                    Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.src_lv1
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Forward
+                Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (KEY._col0 > 200) (type: boolean)
+                  Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    aggregations: count(VALUE._col0)
+                    keys: KEY._col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_lv2
+                Filter Operator
+                  predicate: (KEY._col0 < 200) (type: boolean)
+                  Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    aggregations: count(VALUE._col0)
+                    keys: KEY._col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_lv3
+
+  Stage: Stage-4
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv1
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv2
+
+  Stage: Stage-6
+    Stats-Aggr Operator
+
+  Stage: Stage-2
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv3
+
+  Stage: Stage-7
+    Stats-Aggr Operator
+
+PREHOOK: query: from src_10
+insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, count(value) where key > 200 group by key
+insert overwrite table src_lv3 select key, count(value) where key < 200 group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_10
+PREHOOK: Output: default@src_lv1
+PREHOOK: Output: default@src_lv2
+PREHOOK: Output: default@src_lv3
+POSTHOOK: query: from src_10
+insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, count(value) where key > 200 group by key
+insert overwrite table src_lv3 select key, count(value) where key < 200 group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_10
+POSTHOOK: Output: default@src_lv1
+POSTHOOK: Output: default@src_lv2
+POSTHOOK: Output: default@src_lv3
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv3.value EXPRESSION [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from src_lv1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+165	333.0
+238	479.0
+255	513.0
+27	57.0
+278	559.0
+311	625.0
+409	821.0
+484	971.0
+86	175.0
+98	199.0
+PREHOOK: query: select * from src_lv2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+238	1
+255	1
+278	1
+311	1
+409	1
+484	1
+PREHOOK: query: select * from src_lv3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv3
+#### A masked pattern was here ####
+165	1
+27	1
+86	1
+98	1
+PREHOOK: query: -- todo: shared distinct columns (should work with hive.optimize.multigroupby.common.distincts)
+-- 2(LV+GBY) + RS:2GBY
+-- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-GBY[12]-RS[13]-GBY[14]-SEL[15]-FS[16]
+--             -SEL[3]-UDTF[4]-LVJ[5]
+--      -LVF[6]-SEL[7]-LVJ[10]-SEL[17]-GBY[18]-RS[19]-GBY[20]-SEL[21]-FS[22]
+--             -SEL[8]-UDTF[9]-LVJ[10]
+--      -SEL[23]-GBY[24]-RS[25]-GBY[26]-SEL[27]-FS[28]
+explain
+from src_10
+insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C
+insert overwrite table src_lv2 select C, sum(distinct key) lateral view explode(array(key+3, key+4)) A as C group by C
+insert overwrite table src_lv3 select value, sum(distinct key) group by value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- todo: shared distinct columns (should work with hive.optimize.multigroupby.common.distincts)
+-- 2(LV+GBY) + RS:2GBY
+-- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-GBY[12]-RS[13]-GBY[14]-SEL[15]-FS[16]
+--             -SEL[3]-UDTF[4]-LVJ[5]
+--      -LVF[6]-SEL[7]-LVJ[10]-SEL[17]-GBY[18]-RS[19]-GBY[20]-SEL[21]-FS[22]
+--             -SEL[8]-UDTF[9]-LVJ[10]
+--      -SEL[23]-GBY[24]-RS[25]-GBY[26]-SEL[27]-FS[28]
+explain
+from src_10
+insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C
+insert overwrite table src_lv2 select C, sum(distinct key) lateral view explode(array(key+3, key+4)) A as C group by C
+insert overwrite table src_lv3 select value, sum(distinct key) group by value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-4 depends on stages: Stage-3
+  Stage-0 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-4
+  Stage-6 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-4
+  Stage-7 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-3
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (SIMPLE_EDGE)
+        Reducer 4 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src_10
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col5
+                        Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col5 (type: double), _col0 (type: string)
+                          outputColumnNames: _col5, _col0
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Group By Operator
+                            aggregations: sum(DISTINCT _col0)
+                            keys: _col5 (type: double), _col0 (type: string)
+                            mode: hash
+                            outputColumnNames: _col0, _col1, _col2
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: double), _col1 (type: string)
+                              sort order: ++
+                              Map-reduce partition columns: _col0 (type: double)
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: array((key + 1),(key + 2)) (type: array<double>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col5
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Select Operator
+                            expressions: _col5 (type: double), _col0 (type: string)
+                            outputColumnNames: _col5, _col0
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            Group By Operator
+                              aggregations: sum(DISTINCT _col0)
+                              keys: _col5 (type: double), _col0 (type: string)
+                              mode: hash
+                              outputColumnNames: _col0, _col1, _col2
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                              Reduce Output Operator
+                                key expressions: _col0 (type: double), _col1 (type: string)
+                                sort order: ++
+                                Map-reduce partition columns: _col0 (type: double)
+                                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col5
+                        Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col5 (type: double), _col0 (type: string)
+                          outputColumnNames: _col5, _col0
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Group By Operator
+                            aggregations: sum(DISTINCT _col0)
+                            keys: _col5 (type: double), _col0 (type: string)
+                            mode: hash
+                            outputColumnNames: _col0, _col1, _col2
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: double), _col1 (type: string)
+                              sort order: ++
+                              Map-reduce partition columns: _col0 (type: double)
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: array((key + 3),(key + 4)) (type: array<double>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col5
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Select Operator
+                            expressions: _col5 (type: double), _col0 (type: string)
+                            outputColumnNames: _col5, _col0
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            Group By Operator
+                              aggregations: sum(DISTINCT _col0)
+                              keys: _col5 (type: double), _col0 (type: string)
+                              mode: hash
+                              outputColumnNames: _col0, _col1, _col2
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                              Reduce Output Operator
+                                key expressions: _col0 (type: double), _col1 (type: string)
+                                sort order: ++
+                                Map-reduce partition columns: _col0 (type: double)
+                                Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: value (type: string), key (type: string)
+                    outputColumnNames: value, key
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: sum(DISTINCT key)
+                      keys: value (type: string), key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(DISTINCT KEY._col1:0._col0)
+                keys: KEY._col0 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.src_lv1
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(DISTINCT KEY._col1:0._col0)
+                keys: KEY._col0 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.src_lv2
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(DISTINCT KEY._col1:0._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.src_lv3
+
+  Stage: Stage-4
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv1
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv2
+
+  Stage: Stage-6
+    Stats-Aggr Operator
+
+  Stage: Stage-2
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv3
+
+  Stage: Stage-7
+    Stats-Aggr Operator
+
+PREHOOK: query: from src_10
+insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C
+insert overwrite table src_lv2 select C, sum(distinct key) lateral view explode(array(key+3, key+4)) A as C group by C
+insert overwrite table src_lv3 select value, sum(distinct key) group by value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_10
+PREHOOK: Output: default@src_lv1
+PREHOOK: Output: default@src_lv2
+PREHOOK: Output: default@src_lv3
+POSTHOOK: query: from src_10
+insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C
+insert overwrite table src_lv2 select C, sum(distinct key) lateral view explode(array(key+3, key+4)) A as C group by C
+insert overwrite table src_lv3 select value, sum(distinct key) group by value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_10
+POSTHOOK: Output: default@src_lv1
+POSTHOOK: Output: default@src_lv2
+POSTHOOK: Output: default@src_lv3
+POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv3.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: select * from src_lv1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+100.0	98.0
+166.0	165.0
+167.0	165.0
+239.0	238.0
+240.0	238.0
+256.0	255.0
+257.0	255.0
+279.0	278.0
+28.0	27.0
+280.0	278.0
+29.0	27.0
+312.0	311.0
+313.0	311.0
+410.0	409.0
+411.0	409.0
+485.0	484.0
+486.0	484.0
+87.0	86.0
+88.0	86.0
+99.0	98.0
+PREHOOK: query: select * from src_lv2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+101.0	98.0
+102.0	98.0
+168.0	165.0
+169.0	165.0
+241.0	238.0
+242.0	238.0
+258.0	255.0
+259.0	255.0
+281.0	278.0
+282.0	278.0
+30.0	27.0
+31.0	27.0
+314.0	311.0
+315.0	311.0
+412.0	409.0
+413.0	409.0
+487.0	484.0
+488.0	484.0
+89.0	86.0
+90.0	86.0
+PREHOOK: query: select * from src_lv3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv3
+#### A masked pattern was here ####
+val_165	165.0
+val_238	238.0
+val_255	255.0
+val_27	27.0
+val_278	278.0
+val_311	311.0
+val_409	409.0
+val_484	484.0
+val_86	86.0
+val_98	98.0
+PREHOOK: query: create table src_lv4 (key string, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_lv4
+POSTHOOK: query: create table src_lv4 (key string, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_lv4
+PREHOOK: query: -- Common distincts optimization works across non-lateral view queries, but not across lateral view multi inserts
+explain
+from src_10
+insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, sum(distinct C) lateral view explode(array(key+3, key+4)) A as C group by key
+insert overwrite table src_lv3 select value, sum(distinct key) where key > 200 group by value
+insert overwrite table src_lv4 select value, sum(distinct key) where key < 200 group by value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Common distincts optimization works across non-lateral view queries, but not across lateral view multi inserts
+explain
+from src_10
+insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, sum(distinct C) lateral view explode(array(key+3, key+4)) A as C group by key
+insert overwrite table src_lv3 select value, sum(distinct key) where key > 200 group by value
+insert overwrite table src_lv4 select value, sum(distinct key) where key < 200 group by value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-5 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-5
+  Stage-6 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-5
+  Stage-7 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-5
+  Stage-8 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-5
+  Stage-9 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (SIMPLE_EDGE)
+        Reducer 4 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src_10
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col5
+                        Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: sum(DISTINCT _col5)
+                          keys: _col0 (type: string), _col5 (type: double)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: string), _col1 (type: double)
+                            sort order: ++
+                            Map-reduce partition columns: _col0 (type: string)
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: array((key + 1),(key + 2)) (type: array<double>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col5
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Group By Operator
+                            aggregations: sum(DISTINCT _col5)
+                            keys: _col0 (type: string), _col5 (type: double)
+                            mode: hash
+                            outputColumnNames: _col0, _col1, _col2
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string), _col1 (type: double)
+                              sort order: ++
+                              Map-reduce partition columns: _col0 (type: string)
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Lateral View Forward
+                    Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: key
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      Lateral View Join Operator
+                        outputColumnNames: _col0, _col5
+                        Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: sum(DISTINCT _col5)
+                          keys: _col0 (type: string), _col5 (type: double)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: string), _col1 (type: double)
+                            sort order: ++
+                            Map-reduce partition columns: _col0 (type: string)
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: array((key + 3),(key + 4)) (type: array<double>)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                      UDTF Operator
+                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                        function name: explode
+                        Lateral View Join Operator
+                          outputColumnNames: _col0, _col5
+                          Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                          Group By Operator
+                            aggregations: sum(DISTINCT _col5)
+                            keys: _col0 (type: string), _col5 (type: double)
+                            mode: hash
+                            outputColumnNames: _col0, _col1, _col2
+                            Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string), _col1 (type: double)
+                              sort order: ++
+                              Map-reduce partition columns: _col0 (type: string)
+                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 200) or (key > 200)) (type: boolean)
+                    Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: value (type: string), key (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: value (type: string)
+                      Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(DISTINCT KEY._col1:0._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.src_lv1
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(DISTINCT KEY._col1:0._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.src_lv2
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Forward
+                Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (KEY._col1:0._col0 > 200) (type: boolean)
+                  Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    aggregations: sum(DISTINCT KEY._col1:0._col0)
+                    keys: KEY._col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_lv3
+                Filter Operator
+                  predicate: (KEY._col1:0._col0 < 200) (type: boolean)
+                  Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    aggregations: sum(DISTINCT KEY._col1:0._col0)
+                    keys: KEY._col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_lv4
+
+  Stage: Stage-5
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv1
+
+  Stage: Stage-6
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv2
+
+  Stage: Stage-7
+    Stats-Aggr Operator
+
+  Stage: Stage-2
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv3
+
+  Stage: Stage-8
+    Stats-Aggr Operator
+
+  Stage: Stage-3
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_lv4
+
+  Stage: Stage-9
+    Stats-Aggr Operator
+
+PREHOOK: query: from src_10
+insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, sum(distinct C) lateral view explode(array(key+3, key+4)) A as C group by key
+insert overwrite table src_lv3 select value, sum(distinct key) where key > 200 group by value
+insert overwrite table src_lv4 select value, sum(distinct key) where key < 200 group by value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_10
+PREHOOK: Output: default@src_lv1
+PREHOOK: Output: default@src_lv2
+PREHOOK: Output: default@src_lv3
+PREHOOK: Output: default@src_lv4
+POSTHOOK: query: from src_10
+insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key
+insert overwrite table src_lv2 select key, sum(distinct C) lateral view explode(array(key+3, key+4)) A as C group by key
+insert overwrite table src_lv3 select value, sum(distinct key) where key > 200 group by value
+insert overwrite table src_lv4 select value, sum(distinct key) where key < 200 group by value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_10
+POSTHOOK: Output: default@src_lv1
+POSTHOOK: Output: default@src_lv2
+POSTHOOK: Output: default@src_lv3
+POSTHOOK: Output: default@src_lv4
+POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv3.value EXPRESSION [(src_10)src_10.null, ]
+POSTHOOK: Lineage: src_lv4.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: src_lv4.value EXPRESSION [(src_10)src_10.null, ]
+PREHOOK: query: select * from src_lv1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv1
+#### A masked pattern was here ####
+165	333.0
+238	479.0
+255	513.0
+27	57.0
+278	559.0
+311	625.0
+409	821.0
+484	971.0
+86	175.0
+98	199.0
+PREHOOK: query: select * from src_lv2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv2
+#### A masked pattern was here ####
+165	337.0
+238	483.0
+255	517.0
+27	61.0
+278	563.0
+311	629.0
+409	825.0
+484	975.0
+86	179.0
+98	203.0
+PREHOOK: query: select * from src_lv3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv3
+#### A masked pattern was here ####
+val_238	238.0
+val_255	255.0
+val_278	278.0
+val_311	311.0
+val_409	409.0
+val_484	484.0
+PREHOOK: query: select * from src_lv4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_lv4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_lv4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_lv4
+#### A masked pattern was here ####
+val_165	165.0
+val_27	27.0
+val_86	86.0
+val_98	98.0

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/offset_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/offset_limit.q.out b/ql/src/test/results/clientpositive/llap/offset_limit.q.out
new file mode 100644
index 0000000..adfeb05
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/offset_limit.q.out
@@ -0,0 +1,277 @@
+PREHOOK: query: EXPLAIN
+SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 10,10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 10,10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), substr(value, 5) (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: sum(_col1)
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col1 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col1 (type: double)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: double)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 10
+                  Offset of rows: 10
+                  Statistics: Num rows: 10 Data size: 950 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 950 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 10,10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 10,10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+116	116.0
+118	236.0
+119	357.0
+12	24.0
+120	240.0
+125	250.0
+126	126.0
+128	384.0
+129	258.0
+131	131.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 0,10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 0,10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0.0
+10	10.0
+100	200.0
+103	206.0
+104	208.0
+105	105.0
+11	11.0
+111	111.0
+113	226.0
+114	114.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 1,10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 1,10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+10	10.0
+100	200.0
+103	206.0
+104	208.0
+105	105.0
+11	11.0
+111	111.0
+113	226.0
+114	114.0
+116	116.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 300,100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 300,100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+86	86.0
+87	87.0
+9	9.0
+90	270.0
+92	92.0
+95	190.0
+96	96.0
+97	194.0
+98	196.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 100 OFFSET 300
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 100 OFFSET 300
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+86	86.0
+87	87.0
+9	9.0
+90	270.0
+92	92.0
+95	190.0
+96	96.0
+97	194.0
+98	196.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 10,10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 10,10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+116	116.0
+118	236.0
+119	357.0
+12	24.0
+120	240.0
+125	250.0
+126	126.0
+128	384.0
+129	258.0
+131	131.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 10,10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 10,10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+116	116.0
+118	236.0
+119	357.0
+12	24.0
+120	240.0
+125	250.0
+126	126.0
+128	384.0
+129	258.0
+131	131.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 0,10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 0,10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0.0
+10	10.0
+100	200.0
+103	206.0
+104	208.0
+105	105.0
+11	11.0
+111	111.0
+113	226.0
+114	114.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 1,10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 1,10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+10	10.0
+100	200.0
+103	206.0
+104	208.0
+105	105.0
+11	11.0
+111	111.0
+113	226.0
+114	114.0
+116	116.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 300,100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 300,100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+86	86.0
+87	87.0
+9	9.0
+90	270.0
+92	92.0
+95	190.0
+96	96.0
+97	194.0
+98	196.0
+PREHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 100 OFFSET 300
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 100 OFFSET 300
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+86	86.0
+87	87.0
+9	9.0
+90	270.0
+92	92.0
+95	190.0
+96	96.0
+97	194.0
+98	196.0


[15/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/orc_create.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_create.q.out b/ql/src/test/results/clientpositive/llap/orc_create.q.out
new file mode 100644
index 0000000..40d127c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_create.q.out
@@ -0,0 +1,782 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orc_create
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orc_create
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orc_create_complex
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_create_complex
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orc_create_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_create_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orc_create_people_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_create_people_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orc_create_people
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_create_people
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE if exists orc_create_cprl
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE if exists orc_create_cprl
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orc_create_staging (
+  str STRING,
+  mp  MAP<STRING,STRING>,
+  lst ARRAY<STRING>,
+  strct STRUCT<A:STRING,B:STRING>
+) ROW FORMAT DELIMITED
+    FIELDS TERMINATED BY '|'
+    COLLECTION ITEMS TERMINATED BY ','
+    MAP KEYS TERMINATED BY ':'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_staging
+POSTHOOK: query: CREATE TABLE orc_create_staging (
+  str STRING,
+  mp  MAP<STRING,STRING>,
+  lst ARRAY<STRING>,
+  strct STRUCT<A:STRING,B:STRING>
+) ROW FORMAT DELIMITED
+    FIELDS TERMINATED BY '|'
+    COLLECTION ITEMS TERMINATED BY ','
+    MAP KEYS TERMINATED BY ':'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_staging
+PREHOOK: query: DESCRIBE FORMATTED orc_create_staging
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_staging
+POSTHOOK: query: DESCRIBE FORMATTED orc_create_staging
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_staging
+# col_name            	data_type           	comment             
+	 	 
+str                 	string              	                    
+mp                  	map<string,string>  	                    
+lst                 	array<string>       	                    
+strct               	struct<A:string,B:string>	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	colelction.delim    	,                   
+	field.delim         	|                   
+	mapkey.delim        	:                   
+	serialization.format	|                   
+PREHOOK: query: CREATE TABLE orc_create (key INT, value STRING)
+   PARTITIONED BY (ds string)
+   STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create
+POSTHOOK: query: CREATE TABLE orc_create (key INT, value STRING)
+   PARTITIONED BY (ds string)
+   STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create
+PREHOOK: query: DESCRIBE FORMATTED orc_create
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create
+POSTHOOK: query: DESCRIBE FORMATTED orc_create
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: DROP TABLE orc_create
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create
+PREHOOK: Output: default@orc_create
+POSTHOOK: query: DROP TABLE orc_create
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create
+POSTHOOK: Output: default@orc_create
+PREHOOK: query: CREATE TABLE orc_create (key INT, value STRING)
+   PARTITIONED BY (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create
+POSTHOOK: query: CREATE TABLE orc_create (key INT, value STRING)
+   PARTITIONED BY (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create
+PREHOOK: query: DESCRIBE FORMATTED orc_create
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create
+POSTHOOK: query: DESCRIBE FORMATTED orc_create
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: ALTER TABLE orc_create SET FILEFORMAT ORC
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@orc_create
+PREHOOK: Output: default@orc_create
+POSTHOOK: query: ALTER TABLE orc_create SET FILEFORMAT ORC
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@orc_create
+POSTHOOK: Output: default@orc_create
+PREHOOK: query: DESCRIBE FORMATTED orc_create
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create
+POSTHOOK: query: DESCRIBE FORMATTED orc_create
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: DROP TABLE orc_create
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create
+PREHOOK: Output: default@orc_create
+POSTHOOK: query: DROP TABLE orc_create
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create
+POSTHOOK: Output: default@orc_create
+PREHOOK: query: CREATE TABLE orc_create (key INT, value STRING)
+   PARTITIONED BY (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create
+POSTHOOK: query: CREATE TABLE orc_create (key INT, value STRING)
+   PARTITIONED BY (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create
+PREHOOK: query: DESCRIBE FORMATTED orc_create
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create
+POSTHOOK: query: DESCRIBE FORMATTED orc_create
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: CREATE TABLE orc_create_complex (
+  str STRING,
+  mp  MAP<STRING,STRING>,
+  lst ARRAY<STRING>,
+  strct STRUCT<A:STRING,B:STRING>
+) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_complex
+POSTHOOK: query: CREATE TABLE orc_create_complex (
+  str STRING,
+  mp  MAP<STRING,STRING>,
+  lst ARRAY<STRING>,
+  strct STRUCT<A:STRING,B:STRING>
+) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_complex
+PREHOOK: query: DESCRIBE FORMATTED orc_create_complex
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_complex
+POSTHOOK: query: DESCRIBE FORMATTED orc_create_complex
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_complex
+# col_name            	data_type           	comment             
+	 	 
+str                 	string              	                    
+mp                  	map<string,string>  	                    
+lst                 	array<string>       	                    
+strct               	struct<A:string,B:string>	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_create_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_create_staging
+PREHOOK: query: SELECT * from orc_create_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_staging
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * from orc_create_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_staging
+#### A masked pattern was here ####
+line1	{"key11":"value11","key12":"value12","key13":"value13"}	["a","b","c"]	{"a":"one","b":"two"}
+line2	{"key21":"value21","key22":"value22","key23":"value23"}	["d","e","f"]	{"a":"three","b":"four"}
+line3	{"key31":"value31","key32":"value32","key33":"value33"}	["g","h","i"]	{"a":"five","b":"six"}
+PREHOOK: query: INSERT OVERWRITE TABLE orc_create_complex SELECT * FROM orc_create_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_staging
+PREHOOK: Output: default@orc_create_complex
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_complex SELECT * FROM orc_create_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_staging
+POSTHOOK: Output: default@orc_create_complex
+POSTHOOK: Lineage: orc_create_complex.lst SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:lst, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: orc_create_complex.mp SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:mp, type:map<string,string>, comment:null), ]
+POSTHOOK: Lineage: orc_create_complex.str SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:str, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_complex.strct SIMPLE [(orc_create_staging)orc_create_staging.FieldSchema(name:strct, type:struct<A:string,B:string>, comment:null), ]
+PREHOOK: query: SELECT * from orc_create_complex
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * from orc_create_complex
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+line1	{"key13":"value13","key11":"value11","key12":"value12"}	["a","b","c"]	{"A":"one","B":"two"}
+line2	{"key21":"value21","key22":"value22","key23":"value23"}	["d","e","f"]	{"A":"three","B":"four"}
+line3	{"key31":"value31","key32":"value32","key33":"value33"}	["g","h","i"]	{"A":"five","B":"six"}
+PREHOOK: query: SELECT str from orc_create_complex
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT str from orc_create_complex
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+line1
+line2
+line3
+PREHOOK: query: SELECT mp from orc_create_complex
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT mp from orc_create_complex
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+{"key13":"value13","key11":"value11","key12":"value12"}
+{"key21":"value21","key22":"value22","key23":"value23"}
+{"key31":"value31","key32":"value32","key33":"value33"}
+PREHOOK: query: SELECT lst from orc_create_complex
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT lst from orc_create_complex
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+["a","b","c"]
+["d","e","f"]
+["g","h","i"]
+PREHOOK: query: SELECT strct from orc_create_complex
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT strct from orc_create_complex
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_complex
+#### A masked pattern was here ####
+{"A":"five","B":"six"}
+{"A":"one","B":"two"}
+{"A":"three","B":"four"}
+PREHOOK: query: CREATE TABLE orc_create_people_staging (
+  id int,
+  first_name string,
+  last_name string,
+  address string,
+  salary decimal,
+  start_date timestamp,
+  state string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people_staging
+POSTHOOK: query: CREATE TABLE orc_create_people_staging (
+  id int,
+  first_name string,
+  last_name string,
+  address string,
+  salary decimal,
+  start_date timestamp,
+  state string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people_staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt'
+  OVERWRITE INTO TABLE orc_create_people_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_create_people_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt'
+  OVERWRITE INTO TABLE orc_create_people_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_create_people_staging
+PREHOOK: query: CREATE TABLE orc_create_people (
+  id int,
+  first_name string,
+  last_name string,
+  address string,
+  salary decimal,
+  start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: CREATE TABLE orc_create_people (
+  id int,
+  first_name string,
+  last_name string,
+  address string,
+  salary decimal,
+  start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+  SELECT * FROM orc_create_people_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people_staging
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+  SELECT * FROM orc_create_people_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people_staging
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+PREHOOK: query: -- test predicate push down with partition pruning
+SELECT COUNT(*) FROM orc_create_people where id < 10 and state = 'Ca'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+#### A masked pattern was here ####
+POSTHOOK: query: -- test predicate push down with partition pruning
+SELECT COUNT(*) FROM orc_create_people where id < 10 and state = 'Ca'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+#### A masked pattern was here ####
+5
+PREHOOK: query: -- test predicate push down
+SELECT COUNT(*) FROM orc_create_people where id = 50
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: -- test predicate push down
+SELECT COUNT(*) FROM orc_create_people where id = 50
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+1
+PREHOOK: query: SELECT COUNT(*) FROM orc_create_people where id between 10 and 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM orc_create_people where id between 10 and 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+11
+PREHOOK: query: SELECT COUNT(*) FROM orc_create_people where id > 10 and id < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM orc_create_people where id > 10 and id < 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+89
+PREHOOK: query: SELECT COUNT(*) FROM orc_create_people where (id + 1) = 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM orc_create_people where (id + 1) = 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+1
+PREHOOK: query: SELECT COUNT(*) FROM orc_create_people where (id + 10) < 200
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM orc_create_people where (id + 10) < 200
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+100
+PREHOOK: query: SELECT COUNT(*) FROM orc_create_people where id < 30  or first_name = "Rafael"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM orc_create_people where id < 30  or first_name = "Rafael"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+30
+PREHOOK: query: SELECT COUNT(*) FROM orc_create_people 
+   where length(substr(first_name, 1, 2)) <= 2 and last_name like '%'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM orc_create_people 
+   where length(substr(first_name, 1, 2)) <= 2 and last_name like '%'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+100
+PREHOOK: query: SELECT COUNT(*) FROM orc_create_people where salary = 200.00
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM orc_create_people where salary = 200.00
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+11
+PREHOOK: query: SELECT COUNT(*) FROM orc_create_people WHERE start_date IS NULL
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM orc_create_people WHERE start_date IS NULL
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+0
+PREHOOK: query: SELECT COUNT(*) FROM orc_create_people WHERE YEAR(start_date) = 2014
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM orc_create_people WHERE YEAR(start_date) = 2014
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- test predicate push down with partition pruning
+SELECT COUNT(*) FROM orc_create_people where salary = 200.00 and state = 'Ca'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+#### A masked pattern was here ####
+POSTHOOK: query: -- test predicate push down with partition pruning
+SELECT COUNT(*) FROM orc_create_people where salary = 200.00 and state = 'Ca'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+#### A masked pattern was here ####
+5
+PREHOOK: query: -- test predicate push down with no column projection
+SELECT id, first_name, last_name, address
+  FROM orc_create_people WHERE id > 90
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Input: default@orc_create_people@state=Ca
+PREHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+POSTHOOK: query: -- test predicate push down with no column projection
+SELECT id, first_name, last_name, address
+  FROM orc_create_people WHERE id > 90
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Input: default@orc_create_people@state=Ca
+POSTHOOK: Input: default@orc_create_people@state=Or
+#### A masked pattern was here ####
+100	Wang	Mitchell	4023 Lacinia. Ave
+91	Genevieve	Wilkins	908 Turpis. Street
+92	Thane	Oneil	6766 Lectus St.
+93	Mariko	Cline	P.O. Box 329, 5375 Ac St.
+94	Lael	Mclean	500-7010 Sit St.
+95	Winifred	Hopper	Ap #140-8982 Velit Avenue
+96	Rafael	England	P.O. Box 405, 7857 Eget Av.
+97	Dana	Carter	814-601 Purus. Av.
+98	Juliet	Battle	Ap #535-1965 Cursus St.
+99	Wynter	Vincent	626-8492 Mollis Avenue
+PREHOOK: query: -- test create with lower case compression method.
+CREATE TABLE orc_create_cprl (id int)
+PARTITIONED BY (cdate date)
+STORED AS ORC
+TBLPROPERTIES (
+'orc.compress'='snappy')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_cprl
+POSTHOOK: query: -- test create with lower case compression method.
+CREATE TABLE orc_create_cprl (id int)
+PARTITIONED BY (cdate date)
+STORED AS ORC
+TBLPROPERTIES (
+'orc.compress'='snappy')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_cprl
+PREHOOK: query: INSERT OVERWRITE table orc_create_cprl PARTITION (cdate = '2015-02-03')
+SELECT 1 from src limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orc_create_cprl@cdate=2015-02-03
+POSTHOOK: query: INSERT OVERWRITE table orc_create_cprl PARTITION (cdate = '2015-02-03')
+SELECT 1 from src limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orc_create_cprl@cdate=2015-02-03
+POSTHOOK: Lineage: orc_create_cprl PARTITION(cdate=2015-02-03).id SIMPLE []
+PREHOOK: query: SELECT * from orc_create_cprl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_cprl
+PREHOOK: Input: default@orc_create_cprl@cdate=2015-02-03
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * from orc_create_cprl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_cprl
+POSTHOOK: Input: default@orc_create_cprl@cdate=2015-02-03
+#### A masked pattern was here ####
+1	2015-02-03
+PREHOOK: query: DROP TABLE orc_create
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create
+PREHOOK: Output: default@orc_create
+POSTHOOK: query: DROP TABLE orc_create
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create
+POSTHOOK: Output: default@orc_create
+PREHOOK: query: DROP TABLE orc_create_complex
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_complex
+PREHOOK: Output: default@orc_create_complex
+POSTHOOK: query: DROP TABLE orc_create_complex
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_complex
+POSTHOOK: Output: default@orc_create_complex
+PREHOOK: query: DROP TABLE orc_create_staging
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_staging
+PREHOOK: Output: default@orc_create_staging
+POSTHOOK: query: DROP TABLE orc_create_staging
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_staging
+POSTHOOK: Output: default@orc_create_staging
+PREHOOK: query: DROP TABLE orc_create_people_staging
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_people_staging
+PREHOOK: Output: default@orc_create_people_staging
+POSTHOOK: query: DROP TABLE orc_create_people_staging
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_people_staging
+POSTHOOK: Output: default@orc_create_people_staging
+PREHOOK: query: DROP TABLE orc_create_people
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: DROP TABLE orc_create_people
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: DROP TABLE orc_create_cprl
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_cprl
+PREHOOK: Output: default@orc_create_cprl
+POSTHOOK: query: DROP TABLE orc_create_cprl
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_cprl
+POSTHOOK: Output: default@orc_create_cprl

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/orc_ppd_varchar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_varchar.q.out b/ql/src/test/results/clientpositive/llap/orc_ppd_varchar.q.out
new file mode 100644
index 0000000..3c43b2f
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_ppd_varchar.q.out
@@ -0,0 +1,220 @@
+PREHOOK: query: create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) stored as orc tblproperties("orc.stripe.size"="16777216")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@newtypesorc
+POSTHOOK: query: create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) stored as orc tblproperties("orc.stripe.size"="16777216")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@newtypesorc
+PREHOOK: query: insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@newtypesorc
+POSTHOOK: query: insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@newtypesorc
+POSTHOOK: Lineage: newtypesorc.c EXPRESSION []
+POSTHOOK: Lineage: newtypesorc.d EXPRESSION []
+POSTHOOK: Lineage: newtypesorc.da EXPRESSION []
+POSTHOOK: Lineage: newtypesorc.v EXPRESSION []
+PREHOOK: query: -- varchar data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
+select sum(hash(*)) from newtypesorc where v="bee"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- varchar data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
+select sum(hash(*)) from newtypesorc where v="bee"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+-252951929000
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v="bee"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v="bee"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+-252951929000
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v!="bee"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v!="bee"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+334427804500
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v!="bee"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v!="bee"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+334427804500
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v<"world"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v<"world"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+-252951929000
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v<"world"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v<"world"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+-252951929000
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v<="world"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v<="world"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+81475875500
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v<="world"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v<="world"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+81475875500
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v="bee   "
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v="bee   "
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v="bee   "
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v="bee   "
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v in ("bee", "orange")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v in ("bee", "orange")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+-252951929000
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v in ("bee", "orange")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v in ("bee", "orange")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+-252951929000
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v in ("bee", "world")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v in ("bee", "world")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+81475875500
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v in ("bee", "world")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v in ("bee", "world")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+81475875500
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v in ("orange")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v in ("orange")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v in ("orange")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v in ("orange")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v between "bee" and "orange"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v between "bee" and "orange"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+-252951929000
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v between "bee" and "orange"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v between "bee" and "orange"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+-252951929000
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v between "bee" and "zombie"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v between "bee" and "zombie"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+81475875500
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v between "bee" and "zombie"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v between "bee" and "zombie"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+81475875500
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v between "orange" and "pine"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v between "orange" and "pine"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: select sum(hash(*)) from newtypesorc where v between "orange" and "pine"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(*)) from newtypesorc where v between "orange" and "pine"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@newtypesorc
+#### A masked pattern was here ####
+NULL


[18/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/multi_insert.q.out b/ql/src/test/results/clientpositive/llap/multi_insert.q.out
new file mode 100644
index 0000000..d8052c6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/multi_insert.q.out
@@ -0,0 +1,2409 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table src_multi1 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table src_multi1 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi1
+PREHOOK: query: create table src_multi2 like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: create table src_multi2 like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_multi2
+PREHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                  Filter Operator
+                    predicate: ((key > 10) and (key < 20)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+12	val_12
+12	val_12
+15	val_15
+15	val_15
+17	val_17
+18	val_18
+18	val_18
+19	val_19
+PREHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                  Filter Operator
+                    predicate: ((key > 10) and (key < 20)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+12	val_12
+12	val_12
+15	val_15
+15	val_15
+17	val_17
+18	val_18
+18	val_18
+19	val_19
+PREHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                  Filter Operator
+                    predicate: ((key > 10) and (key < 20)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+12	val_12
+12	val_12
+15	val_15
+15	val_15
+17	val_17
+18	val_18
+18	val_18
+19	val_19
+PREHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                  Filter Operator
+                    predicate: ((key > 10) and (key < 20)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+12	val_12
+12	val_12
+15	val_15
+15	val_15
+17	val_17
+18	val_18
+18	val_18
+19	val_19
+PREHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
+                    Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string), value (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Forward
+                Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+                Filter Operator
+                  predicate: (KEY._col0 < 10) (type: boolean)
+                  Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_multi1
+                Filter Operator
+                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
+                  Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_multi2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+12	val_12
+15	val_15
+17	val_17
+18	val_18
+19	val_19
+PREHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
+                    Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string), value (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Forward
+                Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+                Filter Operator
+                  predicate: (KEY._col0 < 10) (type: boolean)
+                  Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_multi1
+                Filter Operator
+                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
+                  Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_multi2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+12	val_12
+15	val_15
+17	val_17
+18	val_18
+19	val_19
+PREHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
+                    Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string), value (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Forward
+                Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+                Filter Operator
+                  predicate: (KEY._col0 < 10) (type: boolean)
+                  Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_multi1
+                Filter Operator
+                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
+                  Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_multi2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+12	val_12
+15	val_15
+17	val_17
+18	val_18
+19	val_19
+PREHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
+                    Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string), value (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Forward
+                Statistics: Num rows: 221 Data size: 39338 Basic stats: COMPLETE Column stats: COMPLETE
+                Filter Operator
+                  predicate: (KEY._col0 < 10) (type: boolean)
+                  Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 73 Data size: 12994 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_multi1
+                Filter Operator
+                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
+                  Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 24 Data size: 4272 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.src_multi2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from src
+insert overwrite table src_multi1 select * where key < 10 group by key, value
+insert overwrite table src_multi2 select * where key > 10 and key < 20 group by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+12	val_12
+15	val_15
+17	val_17
+18	val_18
+19	val_19
+PREHOOK: query: explain
+from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 3 <- Union 2 (CONTAINS)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: (_col0 < 10) (type: boolean)
+                      Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                    Filter Operator
+                      predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean)
+                      Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: (_col0 < 10) (type: boolean)
+                      Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                    Filter Operator
+                      predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean)
+                      Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+2	val_2
+4	val_4
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+8	val_8
+9	val_9
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+11	val_11
+12	val_12
+12	val_12
+12	val_12
+12	val_12
+15	val_15
+15	val_15
+15	val_15
+15	val_15
+17	val_17
+17	val_17
+18	val_18
+18	val_18
+18	val_18
+18	val_18
+19	val_19
+19	val_19
+PREHOOK: query: explain
+from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 3 <- Union 2 (CONTAINS)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: (_col0 < 10) (type: boolean)
+                      Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                    Filter Operator
+                      predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean)
+                      Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: (_col0 < 10) (type: boolean)
+                      Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                    Filter Operator
+                      predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean)
+                      Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+2	val_2
+4	val_4
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+8	val_8
+9	val_9
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+11	val_11
+12	val_12
+12	val_12
+12	val_12
+12	val_12
+15	val_15
+15	val_15
+15	val_15
+15	val_15
+17	val_17
+17	val_17
+18	val_18
+18	val_18
+18	val_18
+18	val_18
+19	val_19
+19	val_19
+PREHOOK: query: explain
+from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 3 <- Union 2 (CONTAINS)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: (_col0 < 10) (type: boolean)
+                      Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                    Filter Operator
+                      predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean)
+                      Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: (_col0 < 10) (type: boolean)
+                      Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                    Filter Operator
+                      predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean)
+                      Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+2	val_2
+4	val_4
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+8	val_8
+9	val_9
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+11	val_11
+12	val_12
+12	val_12
+12	val_12
+12	val_12
+15	val_15
+15	val_15
+15	val_15
+15	val_15
+17	val_17
+17	val_17
+18	val_18
+18	val_18
+18	val_18
+18	val_18
+19	val_19
+19	val_19
+PREHOOK: query: explain
+from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 3 <- Union 2 (CONTAINS)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: (_col0 < 10) (type: boolean)
+                      Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                    Filter Operator
+                      predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean)
+                      Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Filter Operator
+                      predicate: (_col0 < 10) (type: boolean)
+                      Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 333 Data size: 59274 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi1
+                    Filter Operator
+                      predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean)
+                      Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 111 Data size: 19758 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.src_multi2
+            Execution mode: llap
+            LLAP IO: no inputs
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_multi2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_multi1
+PREHOOK: Output: default@src_multi2
+POSTHOOK: query: from (select * from src  union all select * from src) s
+insert overwrite table src_multi1 select * where key < 10
+insert overwrite table src_multi2 select * where key > 10 and key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_multi1
+POSTHOOK: Output: default@src_multi2
+POSTHOOK: Lineage: src_multi1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_multi2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_multi1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+2	val_2
+4	val_4
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+8	val_8
+9	val_9
+9	val_9
+PREHOOK: query: select * from src_multi2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_multi2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_multi2
+#### A masked pattern was here ####
+11	val_11
+11	val_11
+12	val_12
+12	val_12
+12	val_12
+12	val_12
+15	val_15
+15	val_15
+15	val_15
+15	val_15
+17	val_17
+17	val_17
+18	val_18
+18	val_18
+18	val_18
+18	val_18
+19	val_19
+19	val_19
+#### A masked pattern was here ####
+PREHOOK: query: explain
+from src 
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src 
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-4 depends on stages: Stage-3
+  Stage-0 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-3
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 0) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  Filter Operator
+                    predicate: (key = 2) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  Filter Operator
+                    predicate: (key = 4) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            LLAP IO: no inputs
+
+  Stage: Stage-4
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      files:
+          hdfs directory: false
+#### A masked pattern was here ####
+
+  Stage: Stage-1
+    Move Operator
+      files:
+          hdfs directory: false
+#### A masked pattern was here ####
+
+  Stage: Stage-2
+    Move Operator
+      files:
+          hdfs directory: false
+#### A masked pattern was here ####
+
+PREHOOK: query: from src 
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: from src 
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: explain
+from src 
+#### A masked pattern was here ####
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src 
+#### A masked pattern was here ####
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-4 depends on stages: Stage-3
+  Stage-0 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-3
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 0) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat

<TRUNCATED>

[07/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out
new file mode 100644
index 0000000..daf2497
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out
@@ -0,0 +1,1140 @@
+PREHOOK: query: -- This test verifies that the sort merge join optimizer works when the tables are sorted on columns which is a superset
+-- of join columns
+
+-- Create bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- This test verifies that the sort merge join optimizer works when the tables are sorted on columns which is a superset
+-- of join columns
+
+-- Create bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 SELECT *
+INSERT OVERWRITE TABLE test_table2 SELECT *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1
+POSTHOOK: Output: default@test_table2
+POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- it should be converted to a sort-merge join, since the first sort column (#join columns = 1) contains the join columns
+EXPLAIN EXTENDED
+SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key ORDER BY a.key LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: -- it should be converted to a sort-merge join, since the first sort column (#join columns = 1) contains the join columns
+EXPLAIN EXTENDED
+SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key ORDER BY a.key LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: test_table1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    SORTBUCKETCOLSPREFIX TRUE
+                    bucket_count 16
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.test_table1
+                    numFiles 16
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct test_table1 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                      SORTBUCKETCOLSPREFIX TRUE
+                      bucket_count 16
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.test_table1
+                      numFiles 16
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct test_table1 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.test_table1
+                  name: default.test_table1
+            Truncated Path -> Alias:
+              /test_table1 [a]
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: test_table2
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    SORTBUCKETCOLSPREFIX TRUE
+                    bucket_count 16
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.test_table2
+                    numFiles 16
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct test_table2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                      SORTBUCKETCOLSPREFIX TRUE
+                      bucket_count 16
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.test_table2
+                      numFiles 16
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct test_table2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.test_table2
+                  name: default.test_table2
+            Truncated Path -> Alias:
+              /test_table2 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Position of Big Table: 0
+                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    null sort order: a
+                    sort order: +
+                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                    tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: string)
+                    auto parallelism: false
+        Reducer 3 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int), VALUE._col2 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types int:string:int:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.escape.crlf true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key ORDER BY a.key LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key ORDER BY a.key LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+2	val_2	2	val_2
+PREHOOK: query: DROP TABLE test_table1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test_table1
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: DROP TABLE test_table1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: DROP TABLE test_table2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test_table2
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: DROP TABLE test_table2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: -- Create bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- Create bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 SELECT key, key, value
+INSERT OVERWRITE TABLE test_table2 SELECT key, key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 SELECT key, key, value
+INSERT OVERWRITE TABLE test_table2 SELECT key, key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1
+POSTHOOK: Output: default@test_table2
+POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1.key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2.key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns
+EXPLAIN EXTENDED
+SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns
+EXPLAIN EXTENDED
+SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int), key2 (type: int)
+                      null sort order: aa
+                      sort order: ++
+                      Map-reduce partition columns: key (type: int), key2 (type: int)
+                      Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: test_table1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    SORTBUCKETCOLSPREFIX TRUE
+                    bucket_count 16
+                    bucket_field_name key
+                    columns key,key2,value
+                    columns.comments 
+                    columns.types int:int:string
+#### A masked pattern was here ####
+                    name default.test_table1
+                    numFiles 16
+                    numRows 500
+                    rawDataSize 7218
+                    serialization.ddl struct test_table1 { i32 key, i32 key2, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 7718
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                      SORTBUCKETCOLSPREFIX TRUE
+                      bucket_count 16
+                      bucket_field_name key
+                      columns key,key2,value
+                      columns.comments 
+                      columns.types int:int:string
+#### A masked pattern was here ####
+                      name default.test_table1
+                      numFiles 16
+                      numRows 500
+                      rawDataSize 7218
+                      serialization.ddl struct test_table1 { i32 key, i32 key2, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 7718
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.test_table1
+                  name: default.test_table1
+            Truncated Path -> Alias:
+              /test_table1 [a]
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int), key2 (type: int)
+                      null sort order: aa
+                      sort order: ++
+                      Map-reduce partition columns: key (type: int), key2 (type: int)
+                      Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: test_table2
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    SORTBUCKETCOLSPREFIX TRUE
+                    bucket_count 16
+                    bucket_field_name key
+                    columns key,key2,value
+                    columns.comments 
+                    columns.types int:int:string
+#### A masked pattern was here ####
+                    name default.test_table2
+                    numFiles 16
+                    numRows 500
+                    rawDataSize 7218
+                    serialization.ddl struct test_table2 { i32 key, i32 key2, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 7718
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                      SORTBUCKETCOLSPREFIX TRUE
+                      bucket_count 16
+                      bucket_field_name key
+                      columns key,key2,value
+                      columns.comments 
+                      columns.types int:int:string
+#### A masked pattern was here ####
+                      name default.test_table2
+                      numFiles 16
+                      numRows 500
+                      rawDataSize 7218
+                      serialization.ddl struct test_table2 { i32 key, i32 key2, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 7718
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.test_table2
+                  name: default.test_table2
+            Truncated Path -> Alias:
+              /test_table2 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int), key2 (type: int)
+                  1 key (type: int), key2 (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col8
+                Position of Big Table: 0
+                Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col6 (type: int), _col7 (type: int), _col8 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    null sort order: a
+                    sort order: +
+                    Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                    tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string)
+                    auto parallelism: false
+        Reducer 3 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 140 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 10 Data size: 140 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3,_col4,_col5
+                          columns.types int:int:string:int:int:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.escape.crlf true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+2	2	val_2	2	2	val_2
+PREHOOK: query: -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns
+-- even if the order is not the same
+EXPLAIN EXTENDED
+SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns
+-- even if the order is not the same
+EXPLAIN EXTENDED
+SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key2 is not null and key is not null) (type: boolean)
+                    Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key2 (type: int), key (type: int)
+                      null sort order: aa
+                      sort order: ++
+                      Map-reduce partition columns: key2 (type: int), key (type: int)
+                      Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: test_table1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    SORTBUCKETCOLSPREFIX TRUE
+                    bucket_count 16
+                    bucket_field_name key
+                    columns key,key2,value
+                    columns.comments 
+                    columns.types int:int:string
+#### A masked pattern was here ####
+                    name default.test_table1
+                    numFiles 16
+                    numRows 500
+                    rawDataSize 7218
+                    serialization.ddl struct test_table1 { i32 key, i32 key2, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 7718
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                      SORTBUCKETCOLSPREFIX TRUE
+                      bucket_count 16
+                      bucket_field_name key
+                      columns key,key2,value
+                      columns.comments 
+                      columns.types int:int:string
+#### A masked pattern was here ####
+                      name default.test_table1
+                      numFiles 16
+                      numRows 500
+                      rawDataSize 7218
+                      serialization.ddl struct test_table1 { i32 key, i32 key2, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 7718
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.test_table1
+                  name: default.test_table1
+            Truncated Path -> Alias:
+              /test_table1 [a]
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key2 is not null and key is not null) (type: boolean)
+                    Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key2 (type: int), key (type: int)
+                      null sort order: aa
+                      sort order: ++
+                      Map-reduce partition columns: key2 (type: int), key (type: int)
+                      Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: test_table2
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    SORTBUCKETCOLSPREFIX TRUE
+                    bucket_count 16
+                    bucket_field_name key
+                    columns key,key2,value
+                    columns.comments 
+                    columns.types int:int:string
+#### A masked pattern was here ####
+                    name default.test_table2
+                    numFiles 16
+                    numRows 500
+                    rawDataSize 7218
+                    serialization.ddl struct test_table2 { i32 key, i32 key2, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 7718
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                      SORTBUCKETCOLSPREFIX TRUE
+                      bucket_count 16
+                      bucket_field_name key
+                      columns key,key2,value
+                      columns.comments 
+                      columns.types int:int:string
+#### A masked pattern was here ####
+                      name default.test_table2
+                      numFiles 16
+                      numRows 500
+                      rawDataSize 7218
+                      serialization.ddl struct test_table2 { i32 key, i32 key2, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 7718
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.test_table2
+                  name: default.test_table2
+            Truncated Path -> Alias:
+              /test_table2 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key2 (type: int), key (type: int)
+                  1 key2 (type: int), key (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col8
+                Position of Big Table: 0
+                Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col6 (type: int), _col7 (type: int), _col8 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    null sort order: a
+                    sort order: +
+                    Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                    tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string)
+                    auto parallelism: false
+        Reducer 3 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 140 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 10 Data size: 140 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3,_col4,_col5
+                          columns.types int:int:string:int:int:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.escape.crlf true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+2	2	val_2	2	2	val_2
+PREHOOK: query: -- it should not be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) do not contain all 
+-- the join columns
+EXPLAIN EXTENDED
+SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: -- it should not be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) do not contain all 
+-- the join columns
+EXPLAIN EXTENDED
+SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int), value (type: string)
+                      null sort order: aa
+                      sort order: ++
+                      Map-reduce partition columns: key (type: int), value (type: string)
+                      Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: key2 (type: int)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: test_table1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    SORTBUCKETCOLSPREFIX TRUE
+                    bucket_count 16
+                    bucket_field_name key
+                    columns key,key2,value
+                    columns.comments 
+                    columns.types int:int:string
+#### A masked pattern was here ####
+                    name default.test_table1
+                    numFiles 16
+                    numRows 500
+                    rawDataSize 7218
+                    serialization.ddl struct test_table1 { i32 key, i32 key2, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 7718
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                      SORTBUCKETCOLSPREFIX TRUE
+                      bucket_count 16
+                      bucket_field_name key
+                      columns key,key2,value
+                      columns.comments 
+                      columns.types int:int:string
+#### A masked pattern was here ####
+                      name default.test_table1
+                      numFiles 16
+                      numRows 500
+                      rawDataSize 7218
+                      serialization.ddl struct test_table1 { i32 key, i32 key2, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 7718
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.test_table1
+                  name: default.test_table1
+            Truncated Path -> Alias:
+              /test_table1 [a]
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int), value (type: string)
+                      null sort order: aa
+                      sort order: ++
+                      Map-reduce partition columns: key (type: int), value (type: string)
+                      Statistics: Num rows: 500 Data size: 7218 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: key2 (type: int)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: test_table2
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    SORTBUCKETCOLSPREFIX TRUE
+                    bucket_count 16
+                    bucket_field_name key
+                    columns key,key2,value
+                    columns.comments 
+                    columns.types int:int:string
+#### A masked pattern was here ####
+                    name default.test_table2
+                    numFiles 16
+                    numRows 500
+                    rawDataSize 7218
+                    serialization.ddl struct test_table2 { i32 key, i32 key2, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 7718
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                      SORTBUCKETCOLSPREFIX TRUE
+                      bucket_count 16
+                      bucket_field_name key
+                      columns key,key2,value
+                      columns.comments 
+                      columns.types int:int:string
+#### A masked pattern was here ####
+                      name default.test_table2
+                      numFiles 16
+                      numRows 500
+                      rawDataSize 7218
+                      serialization.ddl struct test_table2 { i32 key, i32 key2, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 7718
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.test_table2
+                  name: default.test_table2
+            Truncated Path -> Alias:
+              /test_table2 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int), value (type: string)
+                  1 key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col8
+                Position of Big Table: 0
+                Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col6 (type: int), _col7 (type: int), _col8 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    null sort order: a
+                    sort order: +
+                    Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                    tag: -1
+                    TopN: 10
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: int), _col2 (type: string), _col3 (type: int), _col4 (type: int), _col5 (type: string)
+                    auto parallelism: false
+        Reducer 3 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Statistics: Num rows: 550 Data size: 7939 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 140 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 10 Data size: 140 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3,_col4,_col5
+                          columns.types int:int:string:int:int:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.escape.crlf true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+0	0	val_0	0	0	val_0
+2	2	val_2	2	2	val_2
+PREHOOK: query: DROP TABLE test_table1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test_table1
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: DROP TABLE test_table1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: DROP TABLE test_table2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test_table2
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: DROP TABLE test_table2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Output: default@test_table2


[10/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out
new file mode 100644
index 0000000..99a7119
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/reduce_deduplicate_extended.q.out
@@ -0,0 +1,5122 @@
+PREHOOK: query: -- HIVE-2340 deduplicate RS followed by RS
+-- hive.optimize.reducededuplication : wherther using this optimization
+-- hive.optimize.reducededuplication.min.reducer : number of reducer of deduped RS should be this at least
+
+-- RS-mGBY-RS-rGBY
+explain select key, sum(key) from (select * from src distribute by key sort by key, value) Q1 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- HIVE-2340 deduplicate RS followed by RS
+-- hive.optimize.reducededuplication : wherther using this optimization
+-- hive.optimize.reducededuplication.min.reducer : number of reducer of deduped RS should be this at least
+
+-- RS-mGBY-RS-rGBY
+explain select key, sum(key) from (select * from src distribute by key sort by key, value) Q1 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: sum(_col0)
+                  keys: _col0 (type: string)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select key, sum(key), lower(value) from (select * from src order by key) Q1 group by key, lower(value)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, sum(key), lower(value) from (select * from src order by key) Q1 group by key, lower(value)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), lower(_col1) (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), lower(_col1) (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), lower(VALUE._col0) (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: sum(_col0)
+                  keys: _col0 (type: string), _col1 (type: string)
+                  mode: complete
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double), _col1 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select key, sum(key), (X + 1) from (select key, (value + 1) as X from src order by key) Q1 group by key, (X + 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, sum(key), (X + 1) from (select key, (value + 1) as X from src order by key) Q1 group by key, (X + 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), (UDFToDouble(value) + 1.0) (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), (_col1 + 1.0) (type: double)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), (_col1 + 1.0) (type: double)
+                      Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), (VALUE._col0 + 1.0) (type: double)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: sum(_col0)
+                  keys: _col0 (type: string), _col1 (type: double)
+                  mode: complete
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double), _col1 (type: double)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- mGBY-RS-rGBY-RS
+explain select key, sum(key) as value from src group by key order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- mGBY-RS-rGBY-RS
+explain select key, sum(key) as value from src group by key order by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: sum(key)
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- RS-JOIN-mGBY-RS-rGBY
+explain select src.key, sum(src.key) FROM src JOIN src1 ON src.key = src1.key group by src.key, src.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- RS-JOIN-mGBY-RS-rGBY
+explain select src.key, sum(src.key) FROM src JOIN src1 ON src.key = src1.key group by src.key, src.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: sum(_col0)
+                  keys: _col0 (type: string), _col1 (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 30 Data size: 5580 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string), _col1 (type: string)
+                    sort order: ++
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                    Statistics: Num rows: 30 Data size: 5580 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col2 (type: double)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 30 Data size: 5580 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col2 (type: double)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 30 Data size: 2850 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 30 Data size: 2850 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- RS-JOIN-RS
+explain select src.key, src.value FROM src JOIN src1 ON src.key = src1.key order by src.key, src.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- RS-JOIN-RS
+explain select src.key, src.value FROM src JOIN src1 ON src.key = src1.key order by src.key, src.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- mGBY-RS-rGBY-mGBY-RS-rGBY
+explain from (select key, value from src group by key, value) s select s.key group by s.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- mGBY-RS-rGBY-mGBY-RS-rGBY
+explain from (select key, value from src group by key, value) s select s.key group by s.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: key (type: string), value (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 205 Data size: 17835 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 205 Data size: 17835 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select key, count(distinct value) from (select key, value from src group by key, value) t group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, count(distinct value) from (select key, value from src group by key, value) t group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: key (type: string), value (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  keys: _col0 (type: string), _col1 (type: string)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 250 Data size: 44500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: count(_col1)
+                    keys: _col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key, sum(key) from (select * from src distribute by key sort by key, value) Q1 group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, sum(key) from (select * from src distribute by key sort by key, value) Q1 group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0.0
+10	10.0
+100	200.0
+103	206.0
+104	208.0
+105	105.0
+11	11.0
+111	111.0
+113	226.0
+114	114.0
+116	116.0
+118	236.0
+119	357.0
+12	24.0
+120	240.0
+125	250.0
+126	126.0
+128	384.0
+129	258.0
+131	131.0
+133	133.0
+134	268.0
+136	136.0
+137	274.0
+138	552.0
+143	143.0
+145	145.0
+146	292.0
+149	298.0
+15	30.0
+150	150.0
+152	304.0
+153	153.0
+155	155.0
+156	156.0
+157	157.0
+158	158.0
+160	160.0
+162	162.0
+163	163.0
+164	328.0
+165	330.0
+166	166.0
+167	501.0
+168	168.0
+169	676.0
+17	17.0
+170	170.0
+172	344.0
+174	348.0
+175	350.0
+176	352.0
+177	177.0
+178	178.0
+179	358.0
+18	36.0
+180	180.0
+181	181.0
+183	183.0
+186	186.0
+187	561.0
+189	189.0
+19	19.0
+190	190.0
+191	382.0
+192	192.0
+193	579.0
+194	194.0
+195	390.0
+196	196.0
+197	394.0
+199	597.0
+2	2.0
+20	20.0
+200	400.0
+201	201.0
+202	202.0
+203	406.0
+205	410.0
+207	414.0
+208	624.0
+209	418.0
+213	426.0
+214	214.0
+216	432.0
+217	434.0
+218	218.0
+219	438.0
+221	442.0
+222	222.0
+223	446.0
+224	448.0
+226	226.0
+228	228.0
+229	458.0
+230	1150.0
+233	466.0
+235	235.0
+237	474.0
+238	476.0
+239	478.0
+24	48.0
+241	241.0
+242	484.0
+244	244.0
+247	247.0
+248	248.0
+249	249.0
+252	252.0
+255	510.0
+256	512.0
+257	257.0
+258	258.0
+26	52.0
+260	260.0
+262	262.0
+263	263.0
+265	530.0
+266	266.0
+27	27.0
+272	544.0
+273	819.0
+274	274.0
+275	275.0
+277	1108.0
+278	556.0
+28	28.0
+280	560.0
+281	562.0
+282	564.0
+283	283.0
+284	284.0
+285	285.0
+286	286.0
+287	287.0
+288	576.0
+289	289.0
+291	291.0
+292	292.0
+296	296.0
+298	894.0
+30	30.0
+302	302.0
+305	305.0
+306	306.0
+307	614.0
+308	308.0
+309	618.0
+310	310.0
+311	933.0
+315	315.0
+316	948.0
+317	634.0
+318	954.0
+321	642.0
+322	644.0
+323	323.0
+325	650.0
+327	981.0
+33	33.0
+331	662.0
+332	332.0
+333	666.0
+335	335.0
+336	336.0
+338	338.0
+339	339.0
+34	34.0
+341	341.0
+342	684.0
+344	688.0
+345	345.0
+348	1740.0
+35	105.0
+351	351.0
+353	706.0
+356	356.0
+360	360.0
+362	362.0
+364	364.0
+365	365.0
+366	366.0
+367	734.0
+368	368.0
+369	1107.0
+37	74.0
+373	373.0
+374	374.0
+375	375.0
+377	377.0
+378	378.0
+379	379.0
+382	764.0
+384	1152.0
+386	386.0
+389	389.0
+392	392.0
+393	393.0
+394	394.0
+395	790.0
+396	1188.0
+397	794.0
+399	798.0
+4	4.0
+400	400.0
+401	2005.0
+402	402.0
+403	1209.0
+404	808.0
+406	1624.0
+407	407.0
+409	1227.0
+41	41.0
+411	411.0
+413	826.0
+414	828.0
+417	1251.0
+418	418.0
+419	419.0
+42	84.0
+421	421.0
+424	848.0
+427	427.0
+429	858.0
+43	43.0
+430	1290.0
+431	1293.0
+432	432.0
+435	435.0
+436	436.0
+437	437.0
+438	1314.0
+439	878.0
+44	44.0
+443	443.0
+444	444.0
+446	446.0
+448	448.0
+449	449.0
+452	452.0
+453	453.0
+454	1362.0
+455	455.0
+457	457.0
+458	916.0
+459	918.0
+460	460.0
+462	924.0
+463	926.0
+466	1398.0
+467	467.0
+468	1872.0
+469	2345.0
+47	47.0
+470	470.0
+472	472.0
+475	475.0
+477	477.0
+478	956.0
+479	479.0
+480	1440.0
+481	481.0
+482	482.0
+483	483.0
+484	484.0
+485	485.0
+487	487.0
+489	1956.0
+490	490.0
+491	491.0
+492	984.0
+493	493.0
+494	494.0
+495	495.0
+496	496.0
+497	497.0
+498	1494.0
+5	15.0
+51	102.0
+53	53.0
+54	54.0
+57	57.0
+58	116.0
+64	64.0
+65	65.0
+66	66.0
+67	134.0
+69	69.0
+70	210.0
+72	144.0
+74	74.0
+76	152.0
+77	77.0
+78	78.0
+8	8.0
+80	80.0
+82	82.0
+83	166.0
+84	168.0
+85	85.0
+86	86.0
+87	87.0
+9	9.0
+90	270.0
+92	92.0
+95	190.0
+96	96.0
+97	194.0
+98	196.0
+PREHOOK: query: select key, sum(key), lower(value) from (select * from src order by key) Q1 group by key, lower(value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, sum(key), lower(value) from (select * from src order by key) Q1 group by key, lower(value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0.0	val_0
+10	10.0	val_10
+100	200.0	val_100
+103	206.0	val_103
+104	208.0	val_104
+105	105.0	val_105
+11	11.0	val_11
+111	111.0	val_111
+113	226.0	val_113
+114	114.0	val_114
+116	116.0	val_116
+118	236.0	val_118
+119	357.0	val_119
+12	24.0	val_12
+120	240.0	val_120
+125	250.0	val_125
+126	126.0	val_126
+128	384.0	val_128
+129	258.0	val_129
+131	131.0	val_131
+133	133.0	val_133
+134	268.0	val_134
+136	136.0	val_136
+137	274.0	val_137
+138	552.0	val_138
+143	143.0	val_143
+145	145.0	val_145
+146	292.0	val_146
+149	298.0	val_149
+15	30.0	val_15
+150	150.0	val_150
+152	304.0	val_152
+153	153.0	val_153
+155	155.0	val_155
+156	156.0	val_156
+157	157.0	val_157
+158	158.0	val_158
+160	160.0	val_160
+162	162.0	val_162
+163	163.0	val_163
+164	328.0	val_164
+165	330.0	val_165
+166	166.0	val_166
+167	501.0	val_167
+168	168.0	val_168
+169	676.0	val_169
+17	17.0	val_17
+170	170.0	val_170
+172	344.0	val_172
+174	348.0	val_174
+175	350.0	val_175
+176	352.0	val_176
+177	177.0	val_177
+178	178.0	val_178
+179	358.0	val_179
+18	36.0	val_18
+180	180.0	val_180
+181	181.0	val_181
+183	183.0	val_183
+186	186.0	val_186
+187	561.0	val_187
+189	189.0	val_189
+19	19.0	val_19
+190	190.0	val_190
+191	382.0	val_191
+192	192.0	val_192
+193	579.0	val_193
+194	194.0	val_194
+195	390.0	val_195
+196	196.0	val_196
+197	394.0	val_197
+199	597.0	val_199
+2	2.0	val_2
+20	20.0	val_20
+200	400.0	val_200
+201	201.0	val_201
+202	202.0	val_202
+203	406.0	val_203
+205	410.0	val_205
+207	414.0	val_207
+208	624.0	val_208
+209	418.0	val_209
+213	426.0	val_213
+214	214.0	val_214
+216	432.0	val_216
+217	434.0	val_217
+218	218.0	val_218
+219	438.0	val_219
+221	442.0	val_221
+222	222.0	val_222
+223	446.0	val_223
+224	448.0	val_224
+226	226.0	val_226
+228	228.0	val_228
+229	458.0	val_229
+230	1150.0	val_230
+233	466.0	val_233
+235	235.0	val_235
+237	474.0	val_237
+238	476.0	val_238
+239	478.0	val_239
+24	48.0	val_24
+241	241.0	val_241
+242	484.0	val_242
+244	244.0	val_244
+247	247.0	val_247
+248	248.0	val_248
+249	249.0	val_249
+252	252.0	val_252
+255	510.0	val_255
+256	512.0	val_256
+257	257.0	val_257
+258	258.0	val_258
+26	52.0	val_26
+260	260.0	val_260
+262	262.0	val_262
+263	263.0	val_263
+265	530.0	val_265
+266	266.0	val_266
+27	27.0	val_27
+272	544.0	val_272
+273	819.0	val_273
+274	274.0	val_274
+275	275.0	val_275
+277	1108.0	val_277
+278	556.0	val_278
+28	28.0	val_28
+280	560.0	val_280
+281	562.0	val_281
+282	564.0	val_282
+283	283.0	val_283
+284	284.0	val_284
+285	285.0	val_285
+286	286.0	val_286
+287	287.0	val_287
+288	576.0	val_288
+289	289.0	val_289
+291	291.0	val_291
+292	292.0	val_292
+296	296.0	val_296
+298	894.0	val_298
+30	30.0	val_30
+302	302.0	val_302
+305	305.0	val_305
+306	306.0	val_306
+307	614.0	val_307
+308	308.0	val_308
+309	618.0	val_309
+310	310.0	val_310
+311	933.0	val_311
+315	315.0	val_315
+316	948.0	val_316
+317	634.0	val_317
+318	954.0	val_318
+321	642.0	val_321
+322	644.0	val_322
+323	323.0	val_323
+325	650.0	val_325
+327	981.0	val_327
+33	33.0	val_33
+331	662.0	val_331
+332	332.0	val_332
+333	666.0	val_333
+335	335.0	val_335
+336	336.0	val_336
+338	338.0	val_338
+339	339.0	val_339
+34	34.0	val_34
+341	341.0	val_341
+342	684.0	val_342
+344	688.0	val_344
+345	345.0	val_345
+348	1740.0	val_348
+35	105.0	val_35
+351	351.0	val_351
+353	706.0	val_353
+356	356.0	val_356
+360	360.0	val_360
+362	362.0	val_362
+364	364.0	val_364
+365	365.0	val_365
+366	366.0	val_366
+367	734.0	val_367
+368	368.0	val_368
+369	1107.0	val_369
+37	74.0	val_37
+373	373.0	val_373
+374	374.0	val_374
+375	375.0	val_375
+377	377.0	val_377
+378	378.0	val_378
+379	379.0	val_379
+382	764.0	val_382
+384	1152.0	val_384
+386	386.0	val_386
+389	389.0	val_389
+392	392.0	val_392
+393	393.0	val_393
+394	394.0	val_394
+395	790.0	val_395
+396	1188.0	val_396
+397	794.0	val_397
+399	798.0	val_399
+4	4.0	val_4
+400	400.0	val_400
+401	2005.0	val_401
+402	402.0	val_402
+403	1209.0	val_403
+404	808.0	val_404
+406	1624.0	val_406
+407	407.0	val_407
+409	1227.0	val_409
+41	41.0	val_41
+411	411.0	val_411
+413	826.0	val_413
+414	828.0	val_414
+417	1251.0	val_417
+418	418.0	val_418
+419	419.0	val_419
+42	84.0	val_42
+421	421.0	val_421
+424	848.0	val_424
+427	427.0	val_427
+429	858.0	val_429
+43	43.0	val_43
+430	1290.0	val_430
+431	1293.0	val_431
+432	432.0	val_432
+435	435.0	val_435
+436	436.0	val_436
+437	437.0	val_437
+438	1314.0	val_438
+439	878.0	val_439
+44	44.0	val_44
+443	443.0	val_443
+444	444.0	val_444
+446	446.0	val_446
+448	448.0	val_448
+449	449.0	val_449
+452	452.0	val_452
+453	453.0	val_453
+454	1362.0	val_454
+455	455.0	val_455
+457	457.0	val_457
+458	916.0	val_458
+459	918.0	val_459
+460	460.0	val_460
+462	924.0	val_462
+463	926.0	val_463
+466	1398.0	val_466
+467	467.0	val_467
+468	1872.0	val_468
+469	2345.0	val_469
+47	47.0	val_47
+470	470.0	val_470
+472	472.0	val_472
+475	475.0	val_475
+477	477.0	val_477
+478	956.0	val_478
+479	479.0	val_479
+480	1440.0	val_480
+481	481.0	val_481
+482	482.0	val_482
+483	483.0	val_483
+484	484.0	val_484
+485	485.0	val_485
+487	487.0	val_487
+489	1956.0	val_489
+490	490.0	val_490
+491	491.0	val_491
+492	984.0	val_492
+493	493.0	val_493
+494	494.0	val_494
+495	495.0	val_495
+496	496.0	val_496
+497	497.0	val_497
+498	1494.0	val_498
+5	15.0	val_5
+51	102.0	val_51
+53	53.0	val_53
+54	54.0	val_54
+57	57.0	val_57
+58	116.0	val_58
+64	64.0	val_64
+65	65.0	val_65
+66	66.0	val_66
+67	134.0	val_67
+69	69.0	val_69
+70	210.0	val_70
+72	144.0	val_72
+74	74.0	val_74
+76	152.0	val_76
+77	77.0	val_77
+78	78.0	val_78
+8	8.0	val_8
+80	80.0	val_80
+82	82.0	val_82
+83	166.0	val_83
+84	168.0	val_84
+85	85.0	val_85
+86	86.0	val_86
+87	87.0	val_87
+9	9.0	val_9
+90	270.0	val_90
+92	92.0	val_92
+95	190.0	val_95
+96	96.0	val_96
+97	194.0	val_97
+98	196.0	val_98
+PREHOOK: query: select key, sum(key), (X + 1) from (select key, (value + 1) as X from src order by key) Q1 group by key, (X + 1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, sum(key), (X + 1) from (select key, (value + 1) as X from src order by key) Q1 group by key, (X + 1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0.0	NULL
+10	10.0	NULL
+100	200.0	NULL
+103	206.0	NULL
+104	208.0	NULL
+105	105.0	NULL
+11	11.0	NULL
+111	111.0	NULL
+113	226.0	NULL
+114	114.0	NULL
+116	116.0	NULL
+118	236.0	NULL
+119	357.0	NULL
+12	24.0	NULL
+120	240.0	NULL
+125	250.0	NULL
+126	126.0	NULL
+128	384.0	NULL
+129	258.0	NULL
+131	131.0	NULL
+133	133.0	NULL
+134	268.0	NULL
+136	136.0	NULL
+137	274.0	NULL
+138	552.0	NULL
+143	143.0	NULL
+145	145.0	NULL
+146	292.0	NULL
+149	298.0	NULL
+15	30.0	NULL
+150	150.0	NULL
+152	304.0	NULL
+153	153.0	NULL
+155	155.0	NULL
+156	156.0	NULL
+157	157.0	NULL
+158	158.0	NULL
+160	160.0	NULL
+162	162.0	NULL
+163	163.0	NULL
+164	328.0	NULL
+165	330.0	NULL
+166	166.0	NULL
+167	501.0	NULL
+168	168.0	NULL
+169	676.0	NULL
+17	17.0	NULL
+170	170.0	NULL
+172	344.0	NULL
+174	348.0	NULL
+175	350.0	NULL
+176	352.0	NULL
+177	177.0	NULL
+178	178.0	NULL
+179	358.0	NULL
+18	36.0	NULL
+180	180.0	NULL
+181	181.0	NULL
+183	183.0	NULL
+186	186.0	NULL
+187	561.0	NULL
+189	189.0	NULL
+19	19.0	NULL
+190	190.0	NULL
+191	382.0	NULL
+192	192.0	NULL
+193	579.0	NULL
+194	194.0	NULL
+195	390.0	NULL
+196	196.0	NULL
+197	394.0	NULL
+199	597.0	NULL
+2	2.0	NULL
+20	20.0	NULL
+200	400.0	NULL
+201	201.0	NULL
+202	202.0	NULL
+203	406.0	NULL
+205	410.0	NULL
+207	414.0	NULL
+208	624.0	NULL
+209	418.0	NULL
+213	426.0	NULL
+214	214.0	NULL
+216	432.0	NULL
+217	434.0	NULL
+218	218.0	NULL
+219	438.0	NULL
+221	442.0	NULL
+222	222.0	NULL
+223	446.0	NULL
+224	448.0	NULL
+226	226.0	NULL
+228	228.0	NULL
+229	458.0	NULL
+230	1150.0	NULL
+233	466.0	NULL
+235	235.0	NULL
+237	474.0	NULL
+238	476.0	NULL
+239	478.0	NULL
+24	48.0	NULL
+241	241.0	NULL
+242	484.0	NULL
+244	244.0	NULL
+247	247.0	NULL
+248	248.0	NULL
+249	249.0	NULL
+252	252.0	NULL
+255	510.0	NULL
+256	512.0	NULL
+257	257.0	NULL
+258	258.0	NULL
+26	52.0	NULL
+260	260.0	NULL
+262	262.0	NULL
+263	263.0	NULL
+265	530.0	NULL
+266	266.0	NULL
+27	27.0	NULL
+272	544.0	NULL
+273	819.0	NULL
+274	274.0	NULL
+275	275.0	NULL
+277	1108.0	NULL
+278	556.0	NULL
+28	28.0	NULL
+280	560.0	NULL
+281	562.0	NULL
+282	564.0	NULL
+283	283.0	NULL
+284	284.0	NULL
+285	285.0	NULL
+286	286.0	NULL
+287	287.0	NULL
+288	576.0	NULL
+289	289.0	NULL
+291	291.0	NULL
+292	292.0	NULL
+296	296.0	NULL
+298	894.0	NULL
+30	30.0	NULL
+302	302.0	NULL
+305	305.0	NULL
+306	306.0	NULL
+307	614.0	NULL
+308	308.0	NULL
+309	618.0	NULL
+310	310.0	NULL
+311	933.0	NULL
+315	315.0	NULL
+316	948.0	NULL
+317	634.0	NULL
+318	954.0	NULL
+321	642.0	NULL
+322	644.0	NULL
+323	323.0	NULL
+325	650.0	NULL
+327	981.0	NULL
+33	33.0	NULL
+331	662.0	NULL
+332	332.0	NULL
+333	666.0	NULL
+335	335.0	NULL
+336	336.0	NULL
+338	338.0	NULL
+339	339.0	NULL
+34	34.0	NULL
+341	341.0	NULL
+342	684.0	NULL
+344	688.0	NULL
+345	345.0	NULL
+348	1740.0	NULL
+35	105.0	NULL
+351	351.0	NULL
+353	706.0	NULL
+356	356.0	NULL
+360	360.0	NULL
+362	362.0	NULL
+364	364.0	NULL
+365	365.0	NULL
+366	366.0	NULL
+367	734.0	NULL
+368	368.0	NULL
+369	1107.0	NULL
+37	74.0	NULL
+373	373.0	NULL
+374	374.0	NULL
+375	375.0	NULL
+377	377.0	NULL
+378	378.0	NULL
+379	379.0	NULL
+382	764.0	NULL
+384	1152.0	NULL
+386	386.0	NULL
+389	389.0	NULL
+392	392.0	NULL
+393	393.0	NULL
+394	394.0	NULL
+395	790.0	NULL
+396	1188.0	NULL
+397	794.0	NULL
+399	798.0	NULL
+4	4.0	NULL
+400	400.0	NULL
+401	2005.0	NULL
+402	402.0	NULL
+403	1209.0	NULL
+404	808.0	NULL
+406	1624.0	NULL
+407	407.0	NULL
+409	1227.0	NULL
+41	41.0	NULL
+411	411.0	NULL
+413	826.0	NULL
+414	828.0	NULL
+417	1251.0	NULL
+418	418.0	NULL
+419	419.0	NULL
+42	84.0	NULL
+421	421.0	NULL
+424	848.0	NULL
+427	427.0	NULL
+429	858.0	NULL
+43	43.0	NULL
+430	1290.0	NULL
+431	1293.0	NULL
+432	432.0	NULL
+435	435.0	NULL
+436	436.0	NULL
+437	437.0	NULL
+438	1314.0	NULL
+439	878.0	NULL
+44	44.0	NULL
+443	443.0	NULL
+444	444.0	NULL
+446	446.0	NULL
+448	448.0	NULL
+449	449.0	NULL
+452	452.0	NULL
+453	453.0	NULL
+454	1362.0	NULL
+455	455.0	NULL
+457	457.0	NULL
+458	916.0	NULL
+459	918.0	NULL
+460	460.0	NULL
+462	924.0	NULL
+463	926.0	NULL
+466	1398.0	NULL
+467	467.0	NULL
+468	1872.0	NULL
+469	2345.0	NULL
+47	47.0	NULL
+470	470.0	NULL
+472	472.0	NULL
+475	475.0	NULL
+477	477.0	NULL
+478	956.0	NULL
+479	479.0	NULL
+480	1440.0	NULL
+481	481.0	NULL
+482	482.0	NULL
+483	483.0	NULL
+484	484.0	NULL
+485	485.0	NULL
+487	487.0	NULL
+489	1956.0	NULL
+490	490.0	NULL
+491	491.0	NULL
+492	984.0	NULL
+493	493.0	NULL
+494	494.0	NULL
+495	495.0	NULL
+496	496.0	NULL
+497	497.0	NULL
+498	1494.0	NULL
+5	15.0	NULL
+51	102.0	NULL
+53	53.0	NULL
+54	54.0	NULL
+57	57.0	NULL
+58	116.0	NULL
+64	64.0	NULL
+65	65.0	NULL
+66	66.0	NULL
+67	134.0	NULL
+69	69.0	NULL
+70	210.0	NULL
+72	144.0	NULL
+74	74.0	NULL
+76	152.0	NULL
+77	77.0	NULL
+78	78.0	NULL
+8	8.0	NULL
+80	80.0	NULL
+82	82.0	NULL
+83	166.0	NULL
+84	168.0	NULL
+85	85.0	NULL
+86	86.0	NULL
+87	87.0	NULL
+9	9.0	NULL
+90	270.0	NULL
+92	92.0	NULL
+95	190.0	NULL
+96	96.0	NULL
+97	194.0	NULL
+98	196.0	NULL
+PREHOOK: query: select key, sum(key) as value from src group by key order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, sum(key) as value from src group by key order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0.0
+10	10.0
+100	200.0
+103	206.0
+104	208.0
+105	105.0
+11	11.0
+111	111.0
+113	226.0
+114	114.0
+116	116.0
+118	236.0
+119	357.0
+12	24.0
+120	240.0
+125	250.0
+126	126.0
+128	384.0
+129	258.0
+131	131.0
+133	133.0
+134	268.0
+136	136.0
+137	274.0
+138	552.0
+143	143.0
+145	145.0
+146	292.0
+149	298.0
+15	30.0
+150	150.0
+152	304.0
+153	153.0
+155	155.0
+156	156.0
+157	157.0
+158	158.0
+160	160.0
+162	162.0
+163	163.0
+164	328.0
+165	330.0
+166	166.0
+167	501.0
+168	168.0
+169	676.0
+17	17.0
+170	170.0
+172	344.0
+174	348.0
+175	350.0
+176	352.0
+177	177.0
+178	178.0
+179	358.0
+18	36.0
+180	180.0
+181	181.0
+183	183.0
+186	186.0
+187	561.0
+189	189.0
+19	19.0
+190	190.0
+191	382.0
+192	192.0
+193	579.0
+194	194.0
+195	390.0
+196	196.0
+197	394.0
+199	597.0
+2	2.0
+20	20.0
+200	400.0
+201	201.0
+202	202.0
+203	406.0
+205	410.0
+207	414.0
+208	624.0
+209	418.0
+213	426.0
+214	214.0
+216	432.0
+217	434.0
+218	218.0
+219	438.0
+221	442.0
+222	222.0
+223	446.0
+224	448.0
+226	226.0
+228	228.0
+229	458.0
+230	1150.0
+233	466.0
+235	235.0
+237	474.0
+238	476.0
+239	478.0
+24	48.0
+241	241.0
+242	484.0
+244	244.0
+247	247.0
+248	248.0
+249	249.0
+252	252.0
+255	510.0
+256	512.0
+257	257.0
+258	258.0
+26	52.0
+260	260.0
+262	262.0
+263	263.0
+265	530.0
+266	266.0
+27	27.0
+272	544.0
+273	819.0
+274	274.0
+275	275.0
+277	1108.0
+278	556.0
+28	28.0
+280	560.0
+281	562.0
+282	564.0
+283	283.0
+284	284.0
+285	285.0
+286	286.0
+287	287.0
+288	576.0
+289	289.0
+291	291.0
+292	292.0
+296	296.0
+298	894.0
+30	30.0
+302	302.0
+305	305.0
+306	306.0
+307	614.0
+308	308.0
+309	618.0
+310	310.0
+311	933.0
+315	315.0
+316	948.0
+317	634.0
+318	954.0
+321	642.0
+322	644.0
+323	323.0
+325	650.0
+327	981.0
+33	33.0
+331	662.0
+332	332.0
+333	666.0
+335	335.0
+336	336.0
+338	338.0
+339	339.0
+34	34.0
+341	341.0
+342	684.0
+344	688.0
+345	345.0
+348	1740.0
+35	105.0
+351	351.0
+353	706.0
+356	356.0
+360	360.0
+362	362.0
+364	364.0
+365	365.0
+366	366.0
+367	734.0
+368	368.0
+369	1107.0
+37	74.0
+373	373.0
+374	374.0
+375	375.0
+377	377.0
+378	378.0
+379	379.0
+382	764.0
+384	1152.0
+386	386.0
+389	389.0
+392	392.0
+393	393.0
+394	394.0
+395	790.0
+396	1188.0
+397	794.0
+399	798.0
+4	4.0
+400	400.0
+401	2005.0
+402	402.0
+403	1209.0
+404	808.0
+406	1624.0
+407	407.0
+409	1227.0
+41	41.0
+411	411.0
+413	826.0
+414	828.0
+417	1251.0
+418	418.0
+419	419.0
+42	84.0
+421	421.0
+424	848.0
+427	427.0
+429	858.0
+43	43.0
+430	1290.0
+431	1293.0
+432	432.0
+435	435.0
+436	436.0
+437	437.0
+438	1314.0
+439	878.0
+44	44.0
+443	443.0
+444	444.0
+446	446.0
+448	448.0
+449	449.0
+452	452.0
+453	453.0
+454	1362.0
+455	455.0
+457	457.0
+458	916.0
+459	918.0
+460	460.0
+462	924.0
+463	926.0
+466	1398.0
+467	467.0
+468	1872.0
+469	2345.0
+47	47.0
+470	470.0
+472	472.0
+475	475.0
+477	477.0
+478	956.0
+479	479.0
+480	1440.0
+481	481.0
+482	482.0
+483	483.0
+484	484.0
+485	485.0
+487	487.0
+489	1956.0
+490	490.0
+491	491.0
+492	984.0
+493	493.0
+494	494.0
+495	495.0
+496	496.0
+497	497.0
+498	1494.0
+5	15.0
+51	102.0
+53	53.0
+54	54.0
+57	57.0
+58	116.0
+64	64.0
+65	65.0
+66	66.0
+67	134.0
+69	69.0
+70	210.0
+72	144.0
+74	74.0
+76	152.0
+77	77.0
+78	78.0
+8	8.0
+80	80.0
+82	82.0
+83	166.0
+84	168.0
+85	85.0
+86	86.0
+87	87.0
+9	9.0
+90	270.0
+92	92.0
+95	190.0
+96	96.0
+97	194.0
+98	196.0
+PREHOOK: query: select src.key, sum(src.key) FROM src JOIN src1 ON src.key = src1.key group by src.key, src.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: select src.key, sum(src.key) FROM src JOIN src1 ON src.key = src1.key group by src.key, src.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+128	384.0
+146	292.0
+150	150.0
+213	426.0
+224	448.0
+238	476.0
+255	510.0
+273	819.0
+278	556.0
+311	933.0
+369	1107.0
+401	2005.0
+406	1624.0
+66	66.0
+98	196.0
+PREHOOK: query: select src.key, src.value FROM src JOIN src1 ON src.key = src1.key order by src.key, src.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: select src.key, src.value FROM src JOIN src1 ON src.key = src1.key order by src.key, src.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+128	val_128
+128	val_128
+128	val_128
+146	val_146
+146	val_146
+150	val_150
+213	val_213
+213	val_213
+224	val_224
+224	val_224
+238	val_238
+238	val_238
+255	val_255
+255	val_255
+273	val_273
+273	val_273
+273	val_273
+278	val_278
+278	val_278
+311	val_311
+311	val_311
+311	val_311
+369	val_369
+369	val_369
+369	val_369
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+66	val_66
+98	val_98
+98	val_98
+PREHOOK: query: from (select key, value from src group by key, value) s select s.key group by s.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: from (select key, value from src group by key, value) s select s.key group by s.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+10
+100
+103
+104
+105
+11
+111
+113
+114
+116
+118
+119
+12
+120
+125
+126
+128
+129
+131
+133
+134
+136
+137
+138
+143
+145
+146
+149
+15
+150
+152
+153
+155
+156
+157
+158
+160
+162
+163
+164
+165
+166
+167
+168
+169
+17
+170
+172
+174
+175
+176
+177
+178
+179
+18
+180
+181
+183
+186
+187
+189
+19
+190
+191
+192
+193
+194
+195
+196
+197
+199
+2
+20
+200
+201
+202
+203
+205
+207
+208
+209
+213
+214
+216
+217
+218
+219
+221
+222
+223
+224
+226
+228
+229
+230
+233
+235
+237
+238
+239
+24
+241
+242
+244
+247
+248
+249
+252
+255
+256
+257
+258
+26
+260
+262
+263
+265
+266
+27
+272
+273
+274
+275
+277
+278
+28
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+291
+292
+296
+298
+30
+302
+305
+306
+307
+308
+309
+310
+311
+315
+316
+317
+318
+321
+322
+323
+325
+327
+33
+331
+332
+333
+335
+336
+338
+339
+34
+341
+342
+344
+345
+348
+35
+351
+353
+356
+360
+362
+364
+365
+366
+367
+368
+369
+37
+373
+374
+375
+377
+378
+379
+382
+384
+386
+389
+392
+393
+394
+395
+396
+397
+399
+4
+400
+401
+402
+403
+404
+406
+407
+409
+41
+411
+413
+414
+417
+418
+419
+42
+421
+424
+427
+429
+43
+430
+431
+432
+435
+436
+437
+438
+439
+44
+443
+444
+446
+448
+449
+452
+453
+454
+455
+457
+458
+459
+460
+462
+463
+466
+467
+468
+469
+47
+470
+472
+475
+477
+478
+479
+480
+481
+482
+483
+484
+485
+487
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+5
+51
+53
+54
+57
+58
+64
+65
+66
+67
+69
+70
+72
+74
+76
+77
+78
+8
+80
+82
+83
+84
+85
+86
+87
+9
+90
+92
+95
+96
+97
+98
+PREHOOK: query: select key, count(distinct value) from (select key, value from src group by key, value) t group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, count(distinct value) from (select key, value from src group by key, value) t group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	1
+10	1
+100	1
+103	1
+104	1
+105	1
+11	1
+111	1
+113	1
+114	1
+116	1
+118	1
+119	1
+12	1
+120	1
+125	1
+126	1
+128	1
+129	1
+131	1
+133	1
+134	1
+136	1
+137	1
+138	1
+143	1
+145	1
+146	1
+149	1
+15	1
+150	1
+152	1
+153	1
+155	1
+156	1
+157	1
+158	1
+160	1
+162	1
+163	1
+164	1
+165	1
+166	1
+167	1
+168	1
+169	1
+17	1
+170	1
+172	1
+174	1
+175	1
+176	1
+177	1
+178	1
+179	1
+18	1
+180	1
+181	1
+183	1
+186	1
+187	1
+189	1
+19	1
+190	1
+191	1
+192	1
+193	1
+194	1
+195	1
+196	1
+197	1
+199	1
+2	1
+20	1
+200	1
+201	1
+202	1
+203	1
+205	1
+207	1
+208	1
+209	1
+213	1
+214	1
+216	1
+217	1
+218	1
+219	1
+221	1
+222	1
+223	1
+224	1
+226	1
+228	1
+229	1
+230	1
+233	1
+235	1
+237	1
+238	1
+239	1
+24	1
+241	1
+242	1
+244	1
+247	1
+248	1
+249	1
+252	1
+255	1
+256	1
+257	1
+258	1
+26	1
+260	1
+262	1
+263	1
+265	1
+266	1
+27	1
+272	1
+273	1
+274	1
+275	1
+277	1
+278	1
+28	1
+280	1
+281	1
+282	1
+283	1
+284	1
+285	1
+286	1
+287	1
+288	1
+289	1
+291	1
+292	1
+296	1
+298	1
+30	1
+302	1
+305	1
+306	1
+307	1
+308	1
+309	1
+310	1
+311	1
+315	1
+316	1
+317	1
+318	1
+321	1
+322	1
+323	1
+325	1
+327	1
+33	1
+331	1
+332	1
+333	1
+335	1
+336	1
+338	1
+339	1
+34	1
+341	1
+342	1
+344	1
+345	1
+348	1
+35	1
+351	1
+353	1
+356	1
+360	1
+362	1
+364	1
+365	1
+366	1
+367	1
+368	1
+369	1
+37	1
+373	1
+374	1
+375	1
+377	1
+378	1
+379	1
+382	1
+384	1
+386	1
+389	1
+392	1
+393	1
+394	1
+395	1
+396	1
+397	1
+399	1
+4	1
+400	1
+401	1
+402	1
+403	1
+404	1
+406	1
+407	1
+409	1
+41	1
+411	1
+413	1
+414	1
+417	1
+418	1
+419	1
+42	1
+421	1
+424	1
+427	1
+429	1
+43	1
+430	1
+431	1
+432	1
+435	1
+436	1
+437	1
+438	1
+439	1
+44	1
+443	1
+444	1
+446	1
+448	1
+449	1
+452	1
+453	1
+454	1
+455	1
+457	1
+458	1
+459	1
+460	1
+462	1
+463	1
+466	1
+467	1
+468	1
+469	1
+47	1
+470	1
+472	1
+475	1
+477	1
+478	1
+479	1
+480	1
+481	1
+482	1
+483	1
+484	1
+485	1
+487	1
+489	1
+490	1
+491	1
+492	1
+493	1
+494	1
+495	1
+496	1
+497	1
+498	1
+5	1
+51	1
+53	1
+54	1
+57	1
+58	1
+64	1
+65	1
+66	1
+67	1
+69	1
+70	1
+72	1
+74	1
+76	1
+77	1
+78	1
+8	1
+80	1
+82	1
+83	1
+84	1
+85	1
+86	1
+87	1
+9	1
+90	1
+92	1
+95	1
+96	1
+97	1
+98	1
+PREHOOK: query: -- RS-RS-GBY
+explain select key, sum(key) from (select * from src distribute by key sort by key, value) Q1 group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- RS-RS-GBY
+explain select key, sum(key) from (select * from src distribute by key sort by key, value) Q1 group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: sum(_col0)
+                  keys: _col0 (type: string)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select key, sum(key), lower(value) from (select * from src order by key) Q1 group by key, lower(value)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, sum(key), lower(value) from (select * from src order by key) Q1 group by key, lower(value)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), lower(_col1) (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), lower(_col1) (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), lower(VALUE._col0) (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: sum(_col0)
+                  keys: _col0 (type: string), _col1 (type: string)
+                  mode: complete
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double), _col1 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select key, sum(key), (X + 1) from (select key, (value + 1) as X from src order by key) Q1 group by key, (X + 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, sum(key), (X + 1) from (select key, (value + 1) as X from src order by key) Q1 group by key, (X + 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), (UDFToDouble(value) + 1.0) (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), (_col1 + 1.0) (type: double)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), (_col1 + 1.0) (type: double)
+                      Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), (VALUE._col0 + 1.0) (type: double)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: sum(_col0)
+                  keys: _col0 (type: string), _col1 (type: double)
+                  mode: complete
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double), _col1 (type: double)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 500 Data size: 51500 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- RS-GBY-RS
+explain select key, sum(key) as value from src group by key order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- RS-GBY-RS
+explain select key, sum(key) as value from src group by key order by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(KEY._col0)
+                keys: KEY._col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- RS-JOIN-RS-GBY
+explain select src.key, sum(src.key) FROM src JOIN src1 ON src.key = src1.key group by src.key, src.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- RS-JOIN-RS-GBY
+explain select src.key, sum(src.key) FROM src JOIN src1 ON src.key = src1.key group by src.key, src.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                  Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(KEY._col0)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 52 Data size: 9672 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col2 (type: double)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 52 Data size: 4940 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 52 Data size: 4940 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- RS-JOIN-RS
+explain select src.key, src.value FROM src JOIN src1 ON src.key = src1.key order by src.key, src.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- RS-JOIN-RS
+explain select src.key, src.value FROM src JOIN src1 ON src.key = src1.key order by src.key, src.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src1
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 60 Data size: 10680 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- RS-GBY-RS-GBY
+explain from (select key, value from src group by key, value) s select s.key group by s.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- RS-GBY-RS-GBY
+explain from (select key, value from src group by key, value) s select s.key group by s.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string), value (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 205 Data size: 17835 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 205 Data size: 17835 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select key, count(distinct value) from (select key, value from src group by key, value) t group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, count(distinct value) from (select key, value from src group by key, value) t group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string), value (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  keys: _col0 (type: string), _col1 (type: string)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: count(_col1)
+                    keys: _col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key, sum(key) from (select * from src distribute by key sort by key, value) Q1 group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, sum(key) from (select * from src distribute by key sort by key, value) Q1 group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0.0
+10	10.0
+100	200.0
+103	206.0
+104	208.0
+105	105.0
+11	11.0
+111	111.0
+113	226.0
+114	114.0
+116	116.0
+118	236.0
+119	357.0
+12	24.0
+120	240.0
+125	250.0
+126	126.0
+128	384.0
+129	258.0
+131	131.0
+133	133.0
+134	268.0
+136	136.0
+137	274.0
+138	552.0
+143	143.0
+145	145.0
+146	292.0
+149	298.0
+15	30.0
+150	150.0
+152	304.0
+153	153.0
+155	155.0
+156	156.0
+157	157.0
+158	158.0
+160	160.0
+162	162.0
+163	163.0
+164	328.0
+165	330.0
+166	166.0
+167	501.0
+168	168.0
+169	676.0
+17	17.0
+170	170.0
+172	344.0
+174	348.0
+175	350.0
+176	352.0
+177	177.0
+178	178.0
+179	358.0
+18	36.0
+180	180.0
+181	181.0
+183	183.0
+186	186.0
+187	561.0
+189	189.0
+19	19.0
+190	190.0
+191	382.0
+192	192.0
+193	579.0
+194	194.0
+195	390.0
+196	196.0
+197	394.0
+199	597.0
+2	2.0
+20	20.0
+200	400.0
+201	201.0
+202	202.0
+203	406.0
+205	410.0
+207	414.0
+208	624.0
+209	418.0
+213	426.0
+214	214.0
+216	432.0
+217	434.0
+218	218.0
+219	438.0
+221	442.0
+222	222.0
+223	446.0
+224	448.0
+226	226.0
+228	228.0
+229	458.0
+230	1150.0
+233	466.0
+235	235.0
+237	474.0
+238	476.0
+239	478.0
+24	48.0
+241	241.0
+242	484.0
+244	244.0
+247	247.0
+248	248.0
+249	249.0
+252	252.0
+255	510.0
+256	512.0
+257	257.0
+258	258.0
+26	52.0
+260	260.0
+262	262.0
+263	263.0
+265	530.0
+266	266.0
+27	27.0
+272	544.0
+273	819.0
+274	274.0
+275	275.0
+277	1108.0
+278	556.0
+28	28.0
+280	560.0
+281	562.0
+282	564.0
+283	283.0
+284	284.0
+285	285.0
+286	286.0
+287	287.0
+288	576.0
+289	289.0
+291	291.0
+292	292.0
+296	296.0
+298	894.0
+30	30.0
+302	302.0
+305	305.0
+306	306.0
+307	614.0
+308	308.0
+309	618.0
+310	310.0
+311	933.0
+315	315.0
+316	948.0
+317	634.0
+318	954.0
+321	642.0
+322	644.0
+323	323.0
+325	650.0
+327	981.0
+33	33.0
+331	662.0
+332	332.0
+333	666.0
+335	335.0
+336	336.0
+338	338.0
+339	339.0
+34	34.0
+341	341.0
+342	684.0
+344	688.0
+345	345.0
+348	1740.0
+35	105.0
+351	351.0
+353	706.0
+356	356.0
+360	360.0
+362	362.0
+364	364.0
+365	365.0
+366	366.0
+367	734.0
+368	368.0
+369	1107.0
+37	74.0
+373	373.0
+374	374.0
+375	375.0
+377	377.0
+378	378.0
+379	379.0
+382	764.0
+384	1152.0
+386	386.0
+389	389.0
+392	392.0
+393	393.0
+394	394.0
+395	790.0
+396	1188.0
+397	794.0
+399	798.0
+4	4.0
+400	400.0
+401	2005.0
+402	402.0
+403	1209.0
+404	808.0
+406	1624.0
+407	407.0
+409	1227.0
+41	41.0
+411	411.0
+413	826.0
+414	828.0
+417	1251.0
+418	418.0
+419	419.0
+42	84.0
+421	421.0
+424	848.0
+427	427.0
+429	858.0
+43	43.0
+430	1290.0
+431	1293.0
+432	432.0
+435	435.0
+436	436.0
+437	437.0
+438	1314.0
+439	878.0
+44	44.0
+443	443.0
+444	444.0
+446	446.0
+448	448.0
+449	449.0
+452	452.0
+453	453.0
+454	1362.0
+455	455.0
+457	457.0
+458	916.0
+459	918.0
+460	460.0
+462	924.0
+463	926.0
+466	1398.0
+467	467.0
+468	1872.0
+469	2345.0
+47	47.0
+470	470.0
+472	472.0
+475	475.0
+477	477.0
+478	956.0
+479	479.0
+480	1440.0
+481	481.0
+482	482.0
+483	483.0
+484	484.0
+485	485.0
+487	487.0
+489	1956.0
+490	490.0
+491	491.0
+492	984.0
+493	493.0
+494	494.0
+495	495.0
+496	496.0
+497	497.0
+498	1494.0
+5	15.0
+51	102.0
+53	53.0
+54	54.0
+57	57.0
+58	116.0
+64	64.0
+65	65.0
+66	66.0
+67	134.0
+69	69.0
+70	210.0
+72	144.0
+74	74.0
+76	152.0
+77	77.0
+78	78.0
+8	8.0
+80	80.0
+82	82.0
+83	166.0
+84	168.0
+85	85.0
+86	86.0
+87	87.0
+9	9.0
+90	270.0
+92	92.0
+95	190.0
+96	96.0
+97	194.0
+98	196.0
+PREHOOK: query: select key, sum(key), lower(value) from (select * from src order by key) Q1 group by key, lower(value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, sum(key), lower(value) from (select * from src order by key) Q1 group by key, lower(value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	0.0	val_0
+10	10.0	val_10
+100	200.0	val_100
+103	206.0	val_103
+104	208.0	val_104
+105	105.0	val_105
+11	11.0	val_11
+111	111.0	val_111
+113	226.0	val_113
+114	114.0	val_114
+116	116.0	val_116
+118	236.0	val_118
+119	357.0	val_119
+12	24.0	val_12
+120	240.0	val_120
+125	250.0	val_125
+126	126.0	val_126
+128	384.0	val_128
+129	258.0	val_129
+131	131.0	val_131
+133	133.0	val_133
+134	268.0	val_134
+136	136.0	val_136
+137	274.0	val_137
+138	552.0	val_138
+143	143.0	val_143
+145	145.0	val_145
+146	292.0	val_146
+149	298.0	val_149
+15	30.0	val_15
+150	150.0	val_150
+152	304.0	val_152
+153	153.0	val_153
+155	155.0	val_155
+156	156.0	val_156
+157	157.0	val_157
+158	158.0	val_158
+160	160.0	val_160
+162	162.0	val_162
+163	163.0	val_163
+164	328.0	val_164
+165	330.0	val_165
+166	166.0	val_166
+167	501.0	val_167
+168	168.0	val_168
+169	676.0	val_169
+17	17.0	val_17
+170	170.0	val_170
+172	344.0	val_172
+174	348.0	val_174
+175	350.0	val_175
+176	352.0	val_176
+177	177.0	val_177
+178	178.0	val_178
+179	358.0	val_179
+18	36.0	val_18
+180	180.0	val_180
+181	181.0	val_181
+183	183.0	val_183
+186	186.0	val_186
+187	561.0	val_187
+189	189.0	val_189
+19	19.0	val_19
+190	190.0	val_190
+191	382.0	val_191
+192	192.0	val_192
+193	579.0	val_193
+194	194.0	val_194
+195	390.0	val_195
+196	196.0	val_196
+197	394.0	val_197
+199	597.0	val_199
+2	2.0	val_2
+20	20.0	val_20
+200	400.0	val_200
+201	201.0	val_201
+202	202.0	val_202
+203	406.0	val_203
+205	410.0	val_205
+207	414.0	val_207
+208	624.0	val_208
+209	418.0	val_209
+213	426.0	val_213
+214	214.0	val_214
+216	432.0	val_216
+217	434.0	val_217
+218	218.0	val_218
+219	438.0	val_219
+221	442.0	val_221
+222	222.0	val_222
+223	446.0	val_223
+224	448.0	val_224
+226	226.0	val_226
+228	228.0	val_228
+229	458.0	val_229
+230	1150.0	val_230
+233	466.0	val_233
+235	235.0	val_235
+237	474.0	val_237
+238	476.0	val_238
+239	478.0	val_239
+24	48.0	val_24
+241	241.0	val_241
+242	484.0	val_242
+244	244.0	val_244
+247	247.0	val_247
+248	248.0	val_248
+249	249.0	val_249
+252	252.0	val_252
+255	510.0	val_255
+256	512.0	val_256
+257	257.0	val_257
+258	258.0	val_258
+26	52.0	val_26
+260	260.0	val_260
+262	262.0	val_262
+263	263.0	val_263
+265	530.0	val_265
+266	266.0	val_266
+27	27.0	val_27
+272	544.0	val_272
+273	819.0	val_273
+274	274.0	val_274
+275	275.0	val_275
+277	1108.0	val_277
+278	556.0	val_278
+28	28.0	val_28
+280	560.0	val_280
+281	562.0	val_281
+282	564.0	val_282
+283	283.0	val_283
+284	284.0	val_284
+285	285.0	val_285
+286	286.0	val_286
+287	287.0	val_287
+288	576.0	val_288
+289	289.0	val_289
+291	291.0	val_291
+292	292.0	val_292
+296	296.0	val_296
+298	894.0	val_298
+30	30.0	val_30
+302	302.0	val_302
+305	305.0	val_305
+306	306.0	val_306
+307	614.0	val_307
+308	308.0	val_308
+309	618.0	val_309
+310	310.0	val_310
+311	933.0	val_311
+315	315.0	val_315
+316	948.0	val_316
+317	634.0	val_317
+318	954.0	val_318
+321	642.0	val_321
+322	644.0	val_322
+323	323.0	val_323
+325	650.0	val_325
+327	981.0	val_327
+33	33.0	val_33
+331	662.0	val_331
+332	332.0	val_332
+333	666.0	val_333
+335	335.0	val_335
+336	336.0	val_336
+338	338.0	val_338
+339	339.0	val_339
+34	34.0	val_34
+341	341.0	val_341
+342	684.0	val_342
+344	688.0	val_344
+345	345.0	val_345
+348	1740.0	val_348
+35	105.0	val_35
+351	351.0	val_351
+353	706.0	val_353
+356	356.0	val_356
+360	360.0	val_360
+362	362.0	val_362
+364	364.0	val_364
+365	365.0	val_365
+366	366.0	val_366
+367	734.0	val_367
+368	368.0	val_368
+369	1107.0	val_369
+37	74.0	val_37
+373	373.0	val_373
+374	374.0	val_374
+375	375.0	val_375
+377	377.0	val_377
+378	378.0	val_378
+379	379.0	val_379
+382	764.0	val_382
+384	1152.0	val_384
+386	386.0	val_386
+389	389.0	val_389
+392	392.0	val_392
+393	393.0	val_393
+394	394.0	val_394
+395	790.0	val_395
+396	1188.0	val_396
+397	794.0	val_397
+399	798.0	val_399
+4	4.0	val_4
+400	400.0	val_400
+401	2005.0	val_401
+4

<TRUNCATED>

[09/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/skewjoinopt15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/skewjoinopt15.q.out b/ql/src/test/results/clientpositive/llap/skewjoinopt15.q.out
new file mode 100644
index 0000000..2c255ed
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/skewjoinopt15.q.out
@@ -0,0 +1,500 @@
+PREHOOK: query: CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tmpT1
+POSTHOOK: query: CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tmpT1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tmpt1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tmpt1
+PREHOOK: query: -- testing skew on other data types - int
+CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1
+POSTHOOK: query: -- testing skew on other data types - int
+CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2))
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmpt1
+PREHOOK: Output: default@t1
+POSTHOOK: query: INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmpt1
+POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key EXPRESSION [(tmpt1)tmpt1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t1.val SIMPLE [(tmpt1)tmpt1.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tmpT2
+POSTHOOK: query: CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tmpT2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tmpt2
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tmpt2
+PREHOOK: query: CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T2
+POSTHOOK: query: CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3))
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T2
+PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tmpt2
+PREHOOK: Output: default@t2
+POSTHOOK: query: INSERT OVERWRITE TABLE T2 SELECT key, val FROM tmpT2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tmpt2
+POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key EXPRESSION [(tmpt2)tmpt2.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t2.val SIMPLE [(tmpt2)tmpt2.FieldSchema(name:val, type:string, comment:null), ]
+PREHOOK: query: -- The skewed key is a integer column.
+-- Otherwise this test is similar to skewjoinopt1.q
+-- Both the joined tables are skewed, and the joined column
+-- is an integer
+-- adding a order by at the end to make the results deterministic
+
+EXPLAIN
+SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The skewed key is a integer column.
+-- Otherwise this test is similar to skewjoinopt1.q
+-- Both the joined tables are skewed, and the joined column
+-- is an integer
+-- adding a order by at the end to make the results deterministic
+
+EXPLAIN
+SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), val (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), val (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key
+ORDER BY a.key, b.key, a.val, b.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key
+ORDER BY a.key, b.key, a.val, b.val
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+2	12	2	22
+3	13	3	13
+8	18	8	18
+8	18	8	18
+8	28	8	18
+8	28	8	18
+PREHOOK: query: -- test outer joins also
+
+EXPLAIN
+SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- test outer joins also
+
+EXPLAIN
+SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int), val (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int), val (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key
+ORDER BY a.key, b.key, a.val, b.val
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key
+ORDER BY a.key, b.key, a.val, b.val
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+NULL	NULL	4	14
+NULL	NULL	5	15
+2	12	2	22
+3	13	3	13
+8	18	8	18
+8	18	8	18
+8	28	8	18
+8	28	8	18
+PREHOOK: query: -- an aggregation at the end should not change anything
+
+EXPLAIN
+SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- an aggregation at the end should not change anything
+
+EXPLAIN
+SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+6
+PREHOOK: query: EXPLAIN
+SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT count(1) FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+8

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/skiphf_aggr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/skiphf_aggr.q.out b/ql/src/test/results/clientpositive/llap/skiphf_aggr.q.out
new file mode 100644
index 0000000..aeb4b1b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/skiphf_aggr.q.out
@@ -0,0 +1,267 @@
+PREHOOK: query: DROP TABLE IF EXISTS skipHTbl
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS skipHTbl
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE skipHTbl (a int) 
+PARTITIONED BY (b int) 
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
+TBLPROPERTIES('skip.header.line.count'='1')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@skipHTbl
+POSTHOOK: query: CREATE TABLE skipHTbl (a int) 
+PARTITIONED BY (b int) 
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
+TBLPROPERTIES('skip.header.line.count'='1')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@skipHTbl
+PREHOOK: query: INSERT OVERWRITE TABLE skipHTbl PARTITION (b = 1) VALUES (1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@skiphtbl@b=1
+POSTHOOK: query: INSERT OVERWRITE TABLE skipHTbl PARTITION (b = 1) VALUES (1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@skiphtbl@b=1
+POSTHOOK: Lineage: skiphtbl PARTITION(b=1).a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: INSERT OVERWRITE TABLE skipHTbl PARTITION (b = 2) VALUES (1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__2
+PREHOOK: Output: default@skiphtbl@b=2
+POSTHOOK: query: INSERT OVERWRITE TABLE skipHTbl PARTITION (b = 2) VALUES (1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__2
+POSTHOOK: Output: default@skiphtbl@b=2
+POSTHOOK: Lineage: skiphtbl PARTITION(b=2).a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: SELECT * FROM skipHTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skiphtbl
+PREHOOK: Input: default@skiphtbl@b=1
+PREHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM skipHTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skiphtbl
+POSTHOOK: Input: default@skiphtbl@b=1
+POSTHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+2	1
+3	1
+4	1
+2	2
+3	2
+4	2
+PREHOOK: query: SELECT DISTINCT b FROM skipHTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skiphtbl
+PREHOOK: Input: default@skiphtbl@b=1
+PREHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT DISTINCT b FROM skipHTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skiphtbl
+POSTHOOK: Input: default@skiphtbl@b=1
+POSTHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+1
+2
+PREHOOK: query: SELECT MAX(b) FROM skipHTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skiphtbl
+PREHOOK: Input: default@skiphtbl@b=1
+PREHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT MAX(b) FROM skipHTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skiphtbl
+POSTHOOK: Input: default@skiphtbl@b=1
+POSTHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+2
+PREHOOK: query: SELECT DISTINCT a FROM skipHTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skiphtbl
+PREHOOK: Input: default@skiphtbl@b=1
+PREHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT DISTINCT a FROM skipHTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skiphtbl
+POSTHOOK: Input: default@skiphtbl@b=1
+POSTHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+2
+3
+4
+PREHOOK: query: INSERT OVERWRITE TABLE skipHTbl PARTITION (b = 1) VALUES (1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__3
+PREHOOK: Output: default@skiphtbl@b=1
+POSTHOOK: query: INSERT OVERWRITE TABLE skipHTbl PARTITION (b = 1) VALUES (1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__3
+POSTHOOK: Output: default@skiphtbl@b=1
+POSTHOOK: Lineage: skiphtbl PARTITION(b=1).a EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: INSERT OVERWRITE TABLE skipHTbl PARTITION (b = 2) VALUES (1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__4
+PREHOOK: Output: default@skiphtbl@b=2
+POSTHOOK: query: INSERT OVERWRITE TABLE skipHTbl PARTITION (b = 2) VALUES (1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__4
+POSTHOOK: Output: default@skiphtbl@b=2
+POSTHOOK: Lineage: skiphtbl PARTITION(b=2).a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: SELECT DISTINCT b FROM skipHTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skiphtbl
+PREHOOK: Input: default@skiphtbl@b=1
+PREHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT DISTINCT b FROM skipHTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skiphtbl
+POSTHOOK: Input: default@skiphtbl@b=1
+POSTHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+2
+PREHOOK: query: SELECT MIN(b) FROM skipHTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skiphtbl
+PREHOOK: Input: default@skiphtbl@b=1
+PREHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT MIN(b) FROM skipHTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skiphtbl
+POSTHOOK: Input: default@skiphtbl@b=1
+POSTHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+2
+PREHOOK: query: SELECT DISTINCT a FROM skipHTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skiphtbl
+PREHOOK: Input: default@skiphtbl@b=1
+PREHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT DISTINCT a FROM skipHTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skiphtbl
+POSTHOOK: Input: default@skiphtbl@b=1
+POSTHOOK: Input: default@skiphtbl@b=2
+#### A masked pattern was here ####
+2
+3
+4
+PREHOOK: query: DROP TABLE IF EXISTS skipFTbl
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS skipFTbl
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE skipFTbl (a int) 
+PARTITIONED BY (b int) 
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
+TBLPROPERTIES('skip.footer.line.count'='1')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@skipFTbl
+POSTHOOK: query: CREATE TABLE skipFTbl (a int) 
+PARTITIONED BY (b int) 
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
+TBLPROPERTIES('skip.footer.line.count'='1')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@skipFTbl
+PREHOOK: query: INSERT OVERWRITE TABLE skipFTbl PARTITION (b = 1) VALUES (1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__5
+PREHOOK: Output: default@skipftbl@b=1
+POSTHOOK: query: INSERT OVERWRITE TABLE skipFTbl PARTITION (b = 1) VALUES (1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__5
+POSTHOOK: Output: default@skipftbl@b=1
+POSTHOOK: Lineage: skipftbl PARTITION(b=1).a EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: INSERT OVERWRITE TABLE skipFTbl PARTITION (b = 2) VALUES (1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__6
+PREHOOK: Output: default@skipftbl@b=2
+POSTHOOK: query: INSERT OVERWRITE TABLE skipFTbl PARTITION (b = 2) VALUES (1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__6
+POSTHOOK: Output: default@skipftbl@b=2
+POSTHOOK: Lineage: skipftbl PARTITION(b=2).a EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: SELECT * FROM skipFTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skipftbl
+PREHOOK: Input: default@skipftbl@b=1
+PREHOOK: Input: default@skipftbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM skipFTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skipftbl
+POSTHOOK: Input: default@skipftbl@b=1
+POSTHOOK: Input: default@skipftbl@b=2
+#### A masked pattern was here ####
+1	1
+2	1
+3	1
+1	2
+2	2
+3	2
+PREHOOK: query: SELECT DISTINCT b FROM skipFTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skipftbl
+PREHOOK: Input: default@skipftbl@b=1
+PREHOOK: Input: default@skipftbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT DISTINCT b FROM skipFTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skipftbl
+POSTHOOK: Input: default@skipftbl@b=1
+POSTHOOK: Input: default@skipftbl@b=2
+#### A masked pattern was here ####
+1
+2
+PREHOOK: query: SELECT MAX(b) FROM skipFTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skipftbl
+PREHOOK: Input: default@skipftbl@b=1
+PREHOOK: Input: default@skipftbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT MAX(b) FROM skipFTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skipftbl
+POSTHOOK: Input: default@skipftbl@b=1
+POSTHOOK: Input: default@skipftbl@b=2
+#### A masked pattern was here ####
+2
+PREHOOK: query: SELECT DISTINCT a FROM skipFTbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@skipftbl
+PREHOOK: Input: default@skipftbl@b=1
+PREHOOK: Input: default@skipftbl@b=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT DISTINCT a FROM skipFTbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@skipftbl
+POSTHOOK: Input: default@skipftbl@b=1
+POSTHOOK: Input: default@skipftbl@b=2
+#### A masked pattern was here ####
+1
+2
+3
+PREHOOK: query: DROP TABLE skipHTbl
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@skiphtbl
+PREHOOK: Output: default@skiphtbl
+POSTHOOK: query: DROP TABLE skipHTbl
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@skiphtbl
+POSTHOOK: Output: default@skiphtbl
+PREHOOK: query: DROP TABLE skipFTbl
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@skipftbl
+PREHOOK: Output: default@skipftbl
+POSTHOOK: query: DROP TABLE skipFTbl
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@skipftbl
+POSTHOOK: Output: default@skipftbl


[22/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/groupby_resolution.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/groupby_resolution.q.out b/ql/src/test/results/clientpositive/llap/groupby_resolution.q.out
new file mode 100644
index 0000000..53e52ee
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/groupby_resolution.q.out
@@ -0,0 +1,838 @@
+PREHOOK: query: explain select key, count(*) from src b group by b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, count(*) from src b group by b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count()
+                keys: KEY._col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select b.key, count(*) from src b group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select b.key, count(*) from src b group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count()
+                keys: KEY._col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select key, count(*) from src b group by b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, count(*) from src b group by b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: rand() (type: double)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count()
+                keys: KEY._col0 (type: string)
+                mode: partial1
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: final
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select b.key, count(*) from src b group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select b.key, count(*) from src b group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: rand() (type: double)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count()
+                keys: KEY._col0 (type: string)
+                mode: partial1
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: final
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select key, count(*) from src b group by b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, count(*) from src b group by b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select b.key, count(*) from src b group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select b.key, count(*) from src b group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select key, count(*) from src b group by b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, count(*) from src b group by b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: rand() (type: double)
+                        Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: partials
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: final
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select b.key, count(*) from src b group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select b.key, count(*) from src b group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: rand() (type: double)
+                        Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: partials
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: final
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- windowing after group by
+select key, count(*), rank() over(order by count(*))
+from src b
+where key < '12'
+group by b.key
+order by b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- windowing after group by
+select key, count(*), rank() over(order by count(*))
+from src b
+where key < '12'
+group by b.key
+order by b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	3	12
+10	1	1
+100	2	7
+103	2	7
+104	2	7
+105	1	1
+11	1	1
+111	1	1
+113	2	7
+114	1	1
+116	1	1
+118	2	7
+119	3	12
+PREHOOK: query: -- having after group by
+select key, count(*)
+from src b
+group by b.key
+having key < '12'
+order by b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- having after group by
+select key, count(*)
+from src b
+group by b.key
+having key < '12'
+order by b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	3
+10	1
+100	2
+103	2
+104	2
+105	1
+11	1
+111	1
+113	2
+114	1
+116	1
+118	2
+119	3
+PREHOOK: query: -- having and windowing
+select key, count(*), rank() over(order by count(*))
+from src b
+group by b.key
+having key < '12'
+order by b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- having and windowing
+select key, count(*), rank() over(order by count(*))
+from src b
+group by b.key
+having key < '12'
+order by b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	3	12
+10	1	1
+100	2	7
+103	2	7
+104	2	7
+105	1	1
+11	1	1
+111	1	1
+113	2	7
+114	1	1
+116	1	1
+118	2	7
+119	3	12
+PREHOOK: query: explain
+select key, count(*), rank() over(order by count(*))
+from src b
+group by b.key
+having key < '12'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key, count(*), rank() over(order by count(*))
+from src b
+group by b.key
+having key < '12'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key < '12') (type: boolean)
+                    Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count()
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: rand() (type: double)
+                        Statistics: Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: partials
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: final
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: 0 (type: int), _col1 (type: bigint)
+                  sort order: ++
+                  Map-reduce partition columns: 0 (type: int)
+                  Statistics: Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: string)
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: bigint)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE
+                PTF Operator
+                  Function definitions:
+                      Input definition
+                        input alias: ptf_0
+                        output shape: _col0: string, _col1: bigint
+                        type: WINDOWING
+                      Windowing table definition
+                        input alias: ptf_1
+                        name: windowingtablefunction
+                        order by: _col1 ASC NULLS FIRST
+                        partition by: 0
+                        raw input shape:
+                        window functions:
+                            window function definition
+                              alias: rank_window_0
+                              arguments: _col1
+                              name: rank
+                              window function: GenericUDAFRankEvaluator
+                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                              isPivotResult: true
+                  Statistics: Num rows: 69 Data size: 6555 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: bigint), rank_window_0 (type: int)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 69 Data size: 6831 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 69 Data size: 6831 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- order by
+select key 
+from src t 
+where key < '12'
+group by t.key 
+order by t.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- order by
+select key 
+from src t 
+where key < '12'
+group by t.key 
+order by t.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+10
+100
+103
+104
+105
+11
+111
+113
+114
+116
+118
+119
+PREHOOK: query: -- cluster by
+EXPLAIN
+SELECT x.key, x.value as key FROM SRC x CLUSTER BY key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- cluster by
+EXPLAIN
+SELECT x.key, x.value as key FROM SRC x CLUSTER BY key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col1 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col1 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+


[12/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/pcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/pcs.q.out b/ql/src/test/results/clientpositive/llap/pcs.q.out
new file mode 100644
index 0000000..b3844ee
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/pcs.q.out
@@ -0,0 +1,1738 @@
+PREHOOK: query: drop table pcs_t1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table pcs_t1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table pcs_t2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table pcs_t2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table pcs_t1 (key int, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@pcs_t1
+POSTHOOK: query: create table pcs_t1 (key int, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@pcs_t1
+PREHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcs_t1@ds=2000-04-08
+POSTHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-08') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-08).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-08).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcs_t1@ds=2000-04-09
+POSTHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-09') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-09).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-09).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@pcs_t1@ds=2000-04-10
+POSTHOOK: query: insert overwrite table pcs_t1 partition (ds='2000-04-10') select * from src where key < 20 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-10
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-10).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: pcs_t1 PARTITION(ds=2000-04-10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: analyze table pcs_t1 partition(ds) compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+PREHOOK: Input: default@pcs_t1@ds=2000-04-10
+PREHOOK: Output: default@pcs_t1
+PREHOOK: Output: default@pcs_t1@ds=2000-04-08
+PREHOOK: Output: default@pcs_t1@ds=2000-04-09
+PREHOOK: Output: default@pcs_t1@ds=2000-04-10
+POSTHOOK: query: analyze table pcs_t1 partition(ds) compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
+POSTHOOK: Output: default@pcs_t1
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Output: default@pcs_t1@ds=2000-04-10
+PREHOOK: query: analyze table pcs_t1 partition(ds) compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+PREHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table pcs_t1 partition(ds) compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+PREHOOK: query: explain extended select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: pcs_t1
+                  Statistics: Num rows: 40 Data size: 11120 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (struct(key,ds)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09')) (type: boolean)
+                    Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: int), value (type: string), ds (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string)
+                        null sort order: aaa
+                        sort order: +++
+                        Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: -1
+                        auto parallelism: false
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2000-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2000-04-08
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.pcs_t1
+                    numFiles 1
+                    numRows 20
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 160
+                    serialization.ddl struct pcs_t1 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 180
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.pcs_t1
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct pcs_t1 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.pcs_t1
+                  name: default.pcs_t1
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2000-04-09
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2000-04-09
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.pcs_t1
+                    numFiles 1
+                    numRows 20
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 160
+                    serialization.ddl struct pcs_t1 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 180
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.pcs_t1
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct pcs_t1 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.pcs_t1
+                  name: default.pcs_t1
+            Truncated Path -> Alias:
+              /pcs_t1/ds=2000-04-08 [pcs_t1]
+              /pcs_t1/ds=2000-04-09 [pcs_t1]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 20 Data size: 5560 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      properties:
+                        columns _col0,_col1,_col2
+                        columns.types int:string:string
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select key, value, ds from pcs_t1 where (ds='2000-04-08' and key=1) or (ds='2000-04-09' and key=2) order by key, value, ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+2	val_2	2000-04-09
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
+            Select Operator
+              expressions: ds (type: string)
+              outputColumnNames: _col0
+              ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+2000-04-09
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (struct(ds,(key + 2))) IN (const struct('2000-04-08',3), const struct('2000-04-09',4)) (type: boolean)
+            Select Operator
+              expressions: ds (type: string)
+              outputColumnNames: _col0
+              ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(ds, key+2) in (struct('2000-04-08',3), struct('2000-04-09',4))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+2000-04-09
+PREHOOK: query: explain extended select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 40 Data size: 7520 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Reduce Output Operator
+                    key expressions: ds (type: string)
+                    null sort order: a
+                    sort order: +
+                    Map-reduce partition columns: ds (type: string)
+                    Statistics: Num rows: 40 Data size: 7520 Basic stats: COMPLETE Column stats: COMPLETE
+                    tag: 0
+                    value expressions: key (type: int)
+                    auto parallelism: true
+                  Select Operator
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 40 Data size: 7520 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE
+                      Dynamic Partitioning Event Operator
+                        Target column: ds (string)
+                        Target Input: b
+                        Partition key expr: ds
+                        Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE
+                        Target Vertex: Map 3
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2000-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2000-04-08
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.pcs_t1
+                    numFiles 1
+                    numRows 20
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 160
+                    serialization.ddl struct pcs_t1 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 180
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.pcs_t1
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct pcs_t1 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.pcs_t1
+                  name: default.pcs_t1
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2000-04-09
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2000-04-09
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.pcs_t1
+                    numFiles 1
+                    numRows 20
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 160
+                    serialization.ddl struct pcs_t1 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 180
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.pcs_t1
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct pcs_t1 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.pcs_t1
+                  name: default.pcs_t1
+            Truncated Path -> Alias:
+              /pcs_t1/ds=2000-04-08 [a]
+              /pcs_t1/ds=2000-04-09 [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 40 Data size: 7520 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Reduce Output Operator
+                    key expressions: ds (type: string)
+                    null sort order: a
+                    sort order: +
+                    Map-reduce partition columns: ds (type: string)
+                    Statistics: Num rows: 40 Data size: 7520 Basic stats: COMPLETE Column stats: COMPLETE
+                    tag: 1
+                    value expressions: key (type: int)
+                    auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2000-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2000-04-08
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.pcs_t1
+                    numFiles 1
+                    numRows 20
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 160
+                    serialization.ddl struct pcs_t1 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 180
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.pcs_t1
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct pcs_t1 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.pcs_t1
+                  name: default.pcs_t1
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2000-04-09
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2000-04-09
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.pcs_t1
+                    numFiles 1
+                    numRows 20
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 160
+                    serialization.ddl struct pcs_t1 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 180
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.pcs_t1
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct pcs_t1 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.pcs_t1
+                  name: default.pcs_t1
+            Truncated Path -> Alias:
+              /pcs_t1/ds=2000-04-08 [b]
+              /pcs_t1/ds=2000-04-09 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 ds (type: string)
+                  1 ds (type: string)
+                outputColumnNames: _col0, _col2, _col6, _col8
+                Position of Big Table: 0
+                Statistics: Num rows: 800 Data size: 300800 Basic stats: COMPLETE Column stats: COMPLETE
+                Filter Operator
+                  isSamplingPred: false
+                  predicate: (struct(_col2,_col0,_col8)) IN (const struct('2000-04-08',1,'2000-04-09'), const struct('2000-04-09',2,'2000-04-08')) (type: boolean)
+                  Statistics: Num rows: 1 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: _col2 (type: string), _col6 (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      GlobalTableId: 0
+#### A masked pattern was here ####
+                      NumFilesPerFileSink: 1
+                      Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          properties:
+                            columns _col0,_col1
+                            columns.types string:int
+                            escape.delim \
+                            hive.serialization.extend.additional.nesting.levels true
+                            serialization.escape.crlf true
+                            serialization.format 1
+                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      TotalFiles: 1
+                      GatherStats: false
+                      MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ MAPJOIN(pcs_t1) */ a.ds, b.key from pcs_t1 a join pcs_t1 b  on a.ds=b.ds where struct(a.ds, a.key, b.ds) in (struct('2000-04-08',1, '2000-04-09'), struct('2000-04-09',2, '2000-04-08'))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (struct(ds,(key + key))) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
+            Select Operator
+              expressions: ds (type: string)
+              outputColumnNames: _col0
+              ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(ds, key+key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+PREHOOK: query: explain select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: pcs_t1
+                  Statistics: Num rows: 40 Data size: 7520 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
+                    Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 20 Data size: 9120 Basic stats: COMPLETE Column stats: COMPLETE
+                PTF Operator
+                  Function definitions:
+                      Input definition
+                        input alias: ptf_0
+                        output shape: _col0: int
+                        type: WINDOWING
+                      Windowing table definition
+                        input alias: ptf_1
+                        name: windowingtablefunction
+                        order by: _col0 ASC NULLS FIRST
+                        partition by: _col0
+                        raw input shape:
+                        window functions:
+                            window function definition
+                              alias: lag_window_0
+                              arguments: _col0
+                              name: lag
+                              window function: GenericUDAFLagEvaluator
+                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                              isPivotResult: true
+                  Statistics: Num rows: 20 Data size: 9120 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: lag_window_0 (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 20 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 20 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select lag(key) over (partition by key) as c1
+from pcs_t1 where struct(ds, key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+#### A masked pattern was here ####
+NULL
+PREHOOK: query: EXPLAIN EXTENDED
+SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 4 <- Union 2 (CONTAINS)
+        Reducer 3 <- Union 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: ((struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) and (struct(ds)) IN (const struct('2000-04-08'), const struct('2000-04-09')) and (ds = '2008-04-08')) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                      Select Operator
+                        expressions: _col0 (type: int), _col1 (type: string)
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: string), '2008-04-08' (type: string)
+                          null sort order: aaa
+                          sort order: +++
+                          Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+                          tag: -1
+                          auto parallelism: true
+            Execution mode: llap
+            LLAP IO: unknown
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: ((struct(ds,key)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) and (struct(ds)) IN (const struct('2000-04-08'), const struct('2000-04-09')) and (ds = '2008-04-08')) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                      Select Operator
+                        expressions: _col0 (type: int), _col1 (type: string)
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: string), '2008-04-08' (type: string)
+                          null sort order: aaa
+                          sort order: +++
+                          Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+                          tag: -1
+                          auto parallelism: true
+            Execution mode: llap
+            LLAP IO: unknown
+        Reducer 3 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), '2008-04-08' (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+                File Output Operator
+                  compressed: false
+                  GlobalTableId: 0
+#### A masked pattern was here ####
+                  NumFilesPerFileSink: 1
+                  Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+#### A masked pattern was here ####
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      properties:
+                        columns _col0,_col1,_col2
+                        columns.types int:string:string
+                        escape.delim \
+                        hive.serialization.extend.additional.nesting.levels true
+                        serialization.escape.crlf true
+                        serialization.format 1
+                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  TotalFiles: 1
+                  GatherStats: false
+                  MultiFileSpray: false
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM (
+  SELECT X.* FROM pcs_t1 X WHERE struct(X.ds, X.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+  UNION ALL
+  SELECT Y.* FROM pcs_t1 Y WHERE struct(Y.ds, Y.key) in (struct('2000-04-08',1), struct('2000-04-09',2))
+) A
+WHERE A.ds = '2008-04-08'
+SORT BY A.key, A.value, A.ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+#### A masked pattern was here ####
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (const struct(10)) IN (const struct(10), const struct(11)) (type: boolean)
+            Select Operator
+              expressions: ds (type: string)
+              outputColumnNames: _col0
+              ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(case when ds='2000-04-08' then 10 else 20 end) in (struct(10),struct(11))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+#### A masked pattern was here ####
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+2000-04-08
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key, rand(100)) in (struct('2000-04-08',1,0.2), struct('2000-04-09',2,0.3))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds, key, rand(100)) in (struct('2000-04-08',1,0.2), struct('2000-04-09',2,0.3))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (struct(ds,key,rand(100))) IN (const struct('2000-04-08',1,0.2), const struct('2000-04-09',2,0.3)) (type: boolean)
+            Select Operator
+              expressions: ds (type: string)
+              outputColumnNames: _col0
+              ListSink
+
+PREHOOK: query: explain extended select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-10
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (struct(((ds = '2000-04-08') or (key = 2)),key)) IN (const struct(true,2), const struct(false,3)) (type: boolean)
+            Select Operator
+              expressions: ds (type: string)
+              outputColumnNames: _col0
+              ListSink
+
+PREHOOK: query: select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+PREHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+2000-04-08
+2000-04-09
+2000-04-10
+PREHOOK: query: explain extended select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-08
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-09
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2000-04-10
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types int:string
+#### A masked pattern was here ####
+              name default.pcs_t1
+              numFiles 1
+              numRows 20
+              partition_columns ds
+              partition_columns.types string
+              rawDataSize 160
+              serialization.ddl struct pcs_t1 { i32 key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 180
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.pcs_t1
+                partition_columns ds
+                partition_columns.types string
+                serialization.ddl struct pcs_t1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.pcs_t1
+            name: default.pcs_t1
+      Processor Tree:
+        TableScan
+          alias: pcs_t1
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((key = 3) or ((struct(((ds = '2000-04-08') or (key = 2)),key)) IN (const struct(true,2), const struct(false,3)) and ((key + 5) > 0))) (type: boolean)
+            Select Operator
+              expressions: ds (type: string)
+              outputColumnNames: _col0
+              ListSink
+
+PREHOOK: query: select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@pcs_t1
+PREHOOK: Input: default@pcs_t1@ds=2000-04-08
+PREHOOK: Input: default@pcs_t1@ds=2000-04-09
+PREHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+POSTHOOK: query: select ds from pcs_t1 where key = 3 or (struct(ds='2000-04-08' or key = 2, key) in (struct(true,2), struct(false,3)) and key+5 > 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@pcs_t1
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-08
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-09
+POSTHOOK: Input: default@pcs_t1@ds=2000-04-10
+#### A masked pattern was here ####
+2000-04-08
+2000-04-09
+2000-04-10


[16/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out b/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out
new file mode 100644
index 0000000..36c2604
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/offset_limit_ppd_optimizer.q.out
@@ -0,0 +1,1476 @@
+PREHOOK: query: explain
+select key,value from src order by key limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key,value from src order by key limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 0.3
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 10
+                  Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key,value from src order by key limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value from src order by key limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+105	val_105
+11	val_11
+111	val_111
+113	val_113
+113	val_113
+114	val_114
+116	val_116
+118	val_118
+118	val_118
+119	val_119
+119	val_119
+119	val_119
+12	val_12
+12	val_12
+120	val_120
+120	val_120
+125	val_125
+125	val_125
+126	val_126
+128	val_128
+PREHOOK: query: explain
+select key,value from src order by key desc limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key,value from src order by key desc limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: -
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 0.3
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 10
+                  Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 3560 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key,value from src order by key desc limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value from src order by key desc limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+90	val_90
+9	val_9
+87	val_87
+86	val_86
+85	val_85
+84	val_84
+84	val_84
+83	val_83
+83	val_83
+82	val_82
+80	val_80
+8	val_8
+78	val_78
+77	val_77
+76	val_76
+76	val_76
+74	val_74
+72	val_72
+72	val_72
+70	val_70
+PREHOOK: query: explain
+select value, sum(key + 1) as sum from src group by value order by value limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select value, sum(key + 1) as sum from src group by value order by value limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: sum(_col1)
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.3
+                        value expressions: _col1 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 10
+                  Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select value, sum(key + 1) as sum from src group by value order by value limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value, sum(key + 1) as sum from src group by value order by value limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_116	117.0
+val_118	238.0
+val_119	360.0
+val_12	26.0
+val_120	242.0
+val_125	252.0
+val_126	127.0
+val_128	387.0
+val_129	260.0
+val_131	132.0
+val_133	134.0
+val_134	270.0
+val_136	137.0
+val_137	276.0
+val_138	556.0
+val_143	144.0
+val_145	146.0
+val_146	294.0
+val_149	300.0
+val_15	32.0
+PREHOOK: query: -- deduped RS
+explain
+select value,avg(key + 1) from src group by value order by value limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- deduped RS
+explain
+select value,avg(key + 1) from src group by value order by value limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: avg(_col1)
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 214 Data size: 36594 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 214 Data size: 36594 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.3
+                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:double>)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: avg(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 10
+                  Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select value,avg(key + 1) from src group by value order by value limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value,avg(key + 1) from src group by value order by value limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_116	117.0
+val_118	119.0
+val_119	120.0
+val_12	13.0
+val_120	121.0
+val_125	126.0
+val_126	127.0
+val_128	129.0
+val_129	130.0
+val_131	132.0
+val_133	134.0
+val_134	135.0
+val_136	137.0
+val_137	138.0
+val_138	139.0
+val_143	144.0
+val_145	146.0
+val_146	147.0
+val_149	150.0
+val_15	16.0
+PREHOOK: query: -- distincts
+explain
+select distinct(cdouble) as dis from alltypesorc order by dis limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- distincts
+explain
+select distinct(cdouble) as dis from alltypesorc order by dis limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: cdouble (type: double)
+                    outputColumnNames: cdouble
+                    Statistics: Num rows: 12288 Data size: 73400 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: cdouble (type: double)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 4265 Data size: 25480 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: double)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: double)
+                        Statistics: Num rows: 4265 Data size: 25480 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.3
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 4265 Data size: 25480 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 10
+                  Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 128 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-16309.0
+-16307.0
+-16306.0
+-16305.0
+-16300.0
+-16296.0
+-16280.0
+-16277.0
+-16274.0
+-16269.0
+-16243.0
+-16236.0
+-16227.0
+-16225.0
+-16221.0
+-16218.0
+-16217.0
+-16211.0
+-16208.0
+-16207.0
+PREHOOK: query: explain
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cdouble (type: double)
+                    outputColumnNames: ctinyint, cdouble
+                    Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: double)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: tinyint)
+                        Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: tinyint)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 95 Data size: 1048 Basic stats: COMPLETE Column stats: COMPLETE
+                  Limit
+                    Number of rows: 20
+                    Offset of rows: 10
+                    Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-55	29
+-54	26
+-53	22
+-52	33
+-51	21
+-50	30
+-49	26
+-48	29
+-47	22
+-46	24
+-45	24
+-44	24
+-43	30
+-42	17
+-41	24
+-40	26
+-39	22
+-38	31
+-37	20
+-36	26
+PREHOOK: query: explain
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cdouble (type: double)
+                    outputColumnNames: ctinyint, cdouble
+                    Statistics: Num rows: 12288 Data size: 110096 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: double)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: tinyint)
+                        Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 55052 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: tinyint)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 95 Data size: 1048 Basic stats: COMPLETE Column stats: COMPLETE
+                  Limit
+                    Number of rows: 20
+                    Offset of rows: 10
+                    Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 20 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-55	29
+-54	26
+-53	22
+-52	33
+-51	21
+-50	30
+-49	26
+-48	29
+-47	22
+-46	24
+-45	24
+-44	24
+-43	30
+-42	17
+-41	24
+-40	26
+-39	22
+-38	31
+-37	20
+-36	26
+PREHOOK: query: -- multi distinct
+explain
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- multi distinct
+explain
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc
+                  Statistics: Num rows: 12288 Data size: 1779850 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
+                    outputColumnNames: ctinyint, cstring1, cstring2
+                    Statistics: Num rows: 12288 Data size: 1779850 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: count(DISTINCT cstring1), count(DISTINCT cstring2)
+                      keys: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Statistics: Num rows: 12288 Data size: 1976458 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string)
+                        sort order: +++
+                        Map-reduce partition columns: _col0 (type: tinyint)
+                        Statistics: Num rows: 12288 Data size: 1976458 Basic stats: COMPLETE Column stats: COMPLETE
+                        TopN Hash Memory Usage: 0.3
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0)
+                keys: KEY._col0 (type: tinyint)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 95 Data size: 1808 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 10
+                  Statistics: Num rows: 20 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 384 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-55	3	21
+-54	3	21
+-53	3	17
+-52	3	21
+-51	1012	1045
+-50	3	25
+-49	3	24
+-48	3	27
+-47	3	23
+-46	3	19
+-45	3	24
+-44	3	31
+-43	3	26
+-42	3	22
+-41	3	29
+-40	3	25
+-39	3	30
+-38	3	19
+-37	3	27
+-36	3	18
+PREHOOK: query: -- limit zero
+explain
+select key,value from src order by key limit 0,0
+PREHOOK: type: QUERY
+POSTHOOK: query: -- limit zero
+explain
+select key,value from src order by key limit 0,0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 0
+      Processor Tree:
+        TableScan
+          alias: src
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Limit
+              Number of rows: 0
+              ListSink
+
+PREHOOK: query: select key,value from src order by key limit 0,0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value from src order by key limit 0,0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+PREHOOK: query: -- 2MR (applied to last RS)
+explain
+select value, sum(key) as sum from src group by value order by sum limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2MR (applied to last RS)
+explain
+select value, sum(key) as sum from src group by value order by sum limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: value (type: string), key (type: string)
+                    outputColumnNames: value, key
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: sum(key)
+                      keys: value (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col1 (type: double)
+                  sort order: +
+                  Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                  TopN Hash Memory Usage: 0.3
+                  value expressions: _col0 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: double)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 10
+                  Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select value, sum(key) as sum from src group by value order by sum limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value, sum(key) as sum from src group by value order by sum limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_20	20.0
+val_12	24.0
+val_27	27.0
+val_28	28.0
+val_30	30.0
+val_15	30.0
+val_33	33.0
+val_34	34.0
+val_18	36.0
+val_41	41.0
+val_43	43.0
+val_44	44.0
+val_47	47.0
+val_24	48.0
+val_26	52.0
+val_53	53.0
+val_54	54.0
+val_57	57.0
+val_64	64.0
+val_65	65.0
+PREHOOK: query: -- map aggregation disabled
+explain
+select value, sum(key) as sum from src group by value order by value limit 10,20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map aggregation disabled
+explain
+select value, sum(key) as sum from src group by value order by value limit 10,20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: value (type: string)
+                      sort order: +
+                      Map-reduce partition columns: value (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 0.3
+                      value expressions: key (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 214 Data size: 21186 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 10
+                  Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 20 Data size: 1980 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select value, sum(key) as sum from src group by value order by value limit 10,20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value, sum(key) as sum from src group by value order by value limit 10,20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_116	116.0
+val_118	236.0
+val_119	357.0
+val_12	24.0
+val_120	240.0
+val_125	250.0
+val_126	126.0
+val_128	384.0
+val_129	258.0
+val_131	131.0
+val_133	133.0
+val_134	268.0
+val_136	136.0
+val_137	274.0
+val_138	552.0
+val_143	143.0
+val_145	145.0
+val_146	292.0
+val_149	298.0
+val_15	30.0
+PREHOOK: query: -- flush for order-by
+explain
+select key,value,value,value,value,value,value,value,value from src order by key limit 30,70
+PREHOOK: type: QUERY
+POSTHOOK: query: -- flush for order-by
+explain
+select key,value,value,value,value,value,value,value,value from src order by key limit 30,70
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 407500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Statistics: Num rows: 500 Data size: 407500 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 2.0E-5
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 500 Data size: 407500 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 70
+                  Offset of rows: 30
+                  Statistics: Num rows: 70 Data size: 57050 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 70 Data size: 57050 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 70
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key,value,value,value,value,value,value,value,value from src order by key limit 30,70
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value,value,value,value,value,value,value,value from src order by key limit 30,70
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+128	val_128	val_128	val_128	val_128	val_128	val_128	val_128	val_128
+128	val_128	val_128	val_128	val_128	val_128	val_128	val_128	val_128
+129	val_129	val_129	val_129	val_129	val_129	val_129	val_129	val_129
+129	val_129	val_129	val_129	val_129	val_129	val_129	val_129	val_129
+131	val_131	val_131	val_131	val_131	val_131	val_131	val_131	val_131
+133	val_133	val_133	val_133	val_133	val_133	val_133	val_133	val_133
+134	val_134	val_134	val_134	val_134	val_134	val_134	val_134	val_134
+134	val_134	val_134	val_134	val_134	val_134	val_134	val_134	val_134
+136	val_136	val_136	val_136	val_136	val_136	val_136	val_136	val_136
+137	val_137	val_137	val_137	val_137	val_137	val_137	val_137	val_137
+137	val_137	val_137	val_137	val_137	val_137	val_137	val_137	val_137
+138	val_138	val_138	val_138	val_138	val_138	val_138	val_138	val_138
+138	val_138	val_138	val_138	val_138	val_138	val_138	val_138	val_138
+138	val_138	val_138	val_138	val_138	val_138	val_138	val_138	val_138
+138	val_138	val_138	val_138	val_138	val_138	val_138	val_138	val_138
+143	val_143	val_143	val_143	val_143	val_143	val_143	val_143	val_143
+145	val_145	val_145	val_145	val_145	val_145	val_145	val_145	val_145
+146	val_146	val_146	val_146	val_146	val_146	val_146	val_146	val_146
+146	val_146	val_146	val_146	val_146	val_146	val_146	val_146	val_146
+149	val_149	val_149	val_149	val_149	val_149	val_149	val_149	val_149
+149	val_149	val_149	val_149	val_149	val_149	val_149	val_149	val_149
+15	val_15	val_15	val_15	val_15	val_15	val_15	val_15	val_15
+15	val_15	val_15	val_15	val_15	val_15	val_15	val_15	val_15
+150	val_150	val_150	val_150	val_150	val_150	val_150	val_150	val_150
+152	val_152	val_152	val_152	val_152	val_152	val_152	val_152	val_152
+152	val_152	val_152	val_152	val_152	val_152	val_152	val_152	val_152
+153	val_153	val_153	val_153	val_153	val_153	val_153	val_153	val_153
+155	val_155	val_155	val_155	val_155	val_155	val_155	val_155	val_155
+156	val_156	val_156	val_156	val_156	val_156	val_156	val_156	val_156
+157	val_157	val_157	val_157	val_157	val_157	val_157	val_157	val_157
+158	val_158	val_158	val_158	val_158	val_158	val_158	val_158	val_158
+160	val_160	val_160	val_160	val_160	val_160	val_160	val_160	val_160
+162	val_162	val_162	val_162	val_162	val_162	val_162	val_162	val_162
+163	val_163	val_163	val_163	val_163	val_163	val_163	val_163	val_163
+164	val_164	val_164	val_164	val_164	val_164	val_164	val_164	val_164
+164	val_164	val_164	val_164	val_164	val_164	val_164	val_164	val_164
+165	val_165	val_165	val_165	val_165	val_165	val_165	val_165	val_165
+165	val_165	val_165	val_165	val_165	val_165	val_165	val_165	val_165
+166	val_166	val_166	val_166	val_166	val_166	val_166	val_166	val_166
+167	val_167	val_167	val_167	val_167	val_167	val_167	val_167	val_167
+167	val_167	val_167	val_167	val_167	val_167	val_167	val_167	val_167
+167	val_167	val_167	val_167	val_167	val_167	val_167	val_167	val_167
+168	val_168	val_168	val_168	val_168	val_168	val_168	val_168	val_168
+169	val_169	val_169	val_169	val_169	val_169	val_169	val_169	val_169
+169	val_169	val_169	val_169	val_169	val_169	val_169	val_169	val_169
+169	val_169	val_169	val_169	val_169	val_169	val_169	val_169	val_169
+169	val_169	val_169	val_169	val_169	val_169	val_169	val_169	val_169
+17	val_17	val_17	val_17	val_17	val_17	val_17	val_17	val_17
+170	val_170	val_170	val_170	val_170	val_170	val_170	val_170	val_170
+172	val_172	val_172	val_172	val_172	val_172	val_172	val_172	val_172
+172	val_172	val_172	val_172	val_172	val_172	val_172	val_172	val_172
+174	val_174	val_174	val_174	val_174	val_174	val_174	val_174	val_174
+174	val_174	val_174	val_174	val_174	val_174	val_174	val_174	val_174
+175	val_175	val_175	val_175	val_175	val_175	val_175	val_175	val_175
+175	val_175	val_175	val_175	val_175	val_175	val_175	val_175	val_175
+176	val_176	val_176	val_176	val_176	val_176	val_176	val_176	val_176
+176	val_176	val_176	val_176	val_176	val_176	val_176	val_176	val_176
+177	val_177	val_177	val_177	val_177	val_177	val_177	val_177	val_177
+178	val_178	val_178	val_178	val_178	val_178	val_178	val_178	val_178
+179	val_179	val_179	val_179	val_179	val_179	val_179	val_179	val_179
+179	val_179	val_179	val_179	val_179	val_179	val_179	val_179	val_179
+18	val_18	val_18	val_18	val_18	val_18	val_18	val_18	val_18
+18	val_18	val_18	val_18	val_18	val_18	val_18	val_18	val_18
+180	val_180	val_180	val_180	val_180	val_180	val_180	val_180	val_180
+181	val_181	val_181	val_181	val_181	val_181	val_181	val_181	val_181
+183	val_183	val_183	val_183	val_183	val_183	val_183	val_183	val_183
+186	val_186	val_186	val_186	val_186	val_186	val_186	val_186	val_186
+187	val_187	val_187	val_187	val_187	val_187	val_187	val_187	val_187
+187	val_187	val_187	val_187	val_187	val_187	val_187	val_187	val_187
+187	val_187	val_187	val_187	val_187	val_187	val_187	val_187	val_187
+PREHOOK: query: -- flush for group-by
+explain
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 30,70
+PREHOOK: type: QUERY
+POSTHOOK: query: -- flush for group-by
+explain
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 30,70
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: concat(key, value, value, value, value, value, value, value, value, value) (type: string), key (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 96000 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: double)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: double)
+                    sort order: +
+                    Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 2.0E-5
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: double)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 70
+                  Offset of rows: 30
+                  Statistics: Num rows: 70 Data size: 560 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 70 Data size: 560 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 70
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 30,70
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 30,70
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+66.0
+69.0
+74.0
+74.0
+77.0
+78.0
+80.0
+82.0
+84.0
+85.0
+86.0
+87.0
+92.0
+96.0
+102.0
+105.0
+105.0
+111.0
+114.0
+116.0
+116.0
+126.0
+131.0
+133.0
+134.0
+136.0
+143.0
+144.0
+145.0
+150.0
+152.0
+153.0
+155.0
+156.0
+157.0
+158.0
+160.0
+162.0
+163.0
+166.0
+166.0
+168.0
+168.0
+170.0
+177.0
+178.0
+180.0
+181.0
+183.0
+186.0
+189.0
+190.0
+190.0
+192.0
+194.0
+194.0
+196.0
+196.0
+200.0
+201.0
+202.0
+206.0
+208.0
+210.0
+214.0
+218.0
+222.0
+226.0
+226.0
+228.0
+PREHOOK: query: -- subqueries
+explain
+select * from
+(select key, count(1) from src group by key order by key limit 10,20) subq
+join
+(select key, count(1) from src group by key limit 20,20) subq2
+on subq.key=subq2.key limit 3,5
+PREHOOK: type: QUERY
+POSTHOOK: query: -- subqueries
+explain
+select * from
+(select key, count(1) from src group by key order by key limit 10,20) subq
+join
+(select key, count(1) from src group by key limit 20,20) subq2
+on subq.key=subq2.key limit 3,5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
+        Reducer 5 <- Map 4 (SIMPLE_EDGE)
+        Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 2.0E-5
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      TopN Hash Memory Usage: 2.0E-5
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(1)
+                keys: KEY._col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 10
+                  Statistics: Num rows: 20 Data size: 1900 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: _col0 is not null (type: boolean)
+                    Statistics: Num rows: 20 Data size: 1900 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 20 Data size: 1900 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 20 Data size: 3800 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 5
+                  Offset of rows: 3
+                  Statistics: Num rows: 5 Data size: 950 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 950 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 5 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(1)
+                keys: KEY._col0 (type: string)
+                mode: complete
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 20
+                  Statistics: Num rows: 20 Data size: 1900 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 20 Data size: 1900 Basic stats: COMPLETE Column stats: COMPLETE
+                    TopN Hash Memory Usage: 2.0E-5
+                    value expressions: _col0 (type: string), _col1 (type: bigint)
+        Reducer 6 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), VALUE._col1 (type: bigint)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 20 Data size: 1900 Basic stats: COMPLETE Column stats: COMPLETE
+                Limit
+                  Number of rows: 20
+                  Offset of rows: 20
+                  Statistics: Num rows: 20 Data size: 1900 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: _col0 is not null (type: boolean)
+                    Statistics: Num rows: 20 Data size: 1900 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 20 Data size: 1900 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: bigint)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 5
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from
+(select key, count(1) from src group by key order by key limit 10,20) subq
+join
+(select key, count(1) from src group by key order by key limit 20,20) subq2
+on subq.key=subq2.key limit 3,5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select key, count(1) from src group by key order by key limit 10,20) subq
+join
+(select key, count(1) from src group by key order by key limit 20,20) subq2
+on subq.key=subq2.key limit 3,5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+137	2	137	2
+138	4	138	4
+143	1	143	1
+145	1	145	1
+146	2	146	2


[33/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
new file mode 100644
index 0000000..e999077
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
@@ -0,0 +1,1973 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl1
+PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl2
+PREHOOK: query: insert overwrite table tbl1
+select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: insert overwrite table tbl1
+select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl1
+POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table tbl2
+select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: insert overwrite table tbl2
+select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl2
+POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+22
+PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from
+(
+  select key, count(*) from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1
+  group by key
+) subq2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from
+(
+  select key, count(*) from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1
+  group by key
+) subq2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    aggregations: count()
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from
+(
+  select key, count(*) from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1
+  group by key
+) subq2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(
+  select key, count(*) from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1
+  group by key
+) subq2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+6
+PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them.
+-- Each sub-query should be converted to a sort-merge join.
+explain
+select src1.key, src1.cnt1, src2.cnt1 from
+(
+  select key, count(*) as cnt1 from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1 group by key
+) src1
+join
+(
+  select key, count(*) as cnt1 from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq2 group by key
+) src2
+on src1.key = src2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them.
+-- Each sub-query should be converted to a sort-merge join.
+explain
+select src1.key, src1.cnt1, src2.cnt1 from
+(
+  select key, count(*) as cnt1 from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1 group by key
+) src1
+join
+(
+  select key, count(*) as cnt1 from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq2 group by key
+) src2
+on src1.key = src2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
+        Reducer 6 <- Map 5 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: bigint)
+            Execution mode: llap
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: bigint), _col3 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 6 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from
+(
+  select key, count(*) as cnt1 from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1 group by key
+) src1
+join
+(
+  select key, count(*) as cnt1 from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq2 group by key
+) src2
+on src1.key = src2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from
+(
+  select key, count(*) as cnt1 from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1 group by key
+) src1
+join
+(
+  select key, count(*) as cnt1 from 
+  (
+    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq2 group by key
+) src2
+on src1.key = src2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+0	9	9
+2	1	1
+4	1	1
+5	9	9
+8	1	1
+9	1	1
+PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
+-- be converted to a sort-merge join.
+explain
+select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
+-- be converted to a sort-merge join.
+explain
+select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
+-- be converted to a sort-merge join, although there is more than one level of sub-query
+explain
+select count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join tbl2 b
+  on subq2.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should 
+-- be converted to a sort-merge join, although there is more than one level of sub-query
+explain
+select count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join tbl2 b
+  on subq2.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join tbl2 b
+  on subq2.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join tbl2 b
+  on subq2.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq3 
+  where key < 6
+  ) subq4
+  on subq2.key = subq4.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq3 
+  where key < 6
+  ) subq4
+  on subq2.key = subq4.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq3 
+  where key < 6
+  ) subq4
+  on subq2.key = subq4.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq3 
+  where key < 6
+  ) subq4
+  on subq2.key = subq4.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key
+-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one 
+-- item, but that is not part of the join key.
+explain
+select count(*) from 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
+    join
+  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key
+-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one 
+-- item, but that is not part of the join key.
+explain
+select count(*) from 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
+    join
+  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 8) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 8) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
+    join
+  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
+    join
+  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side
+-- join should be performed
+explain
+select count(*) from 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
+    join
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side
+-- join should be performed
+explain
+select count(*) from 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
+    join
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key + 1) is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: (key + 1) (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key + 1) is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: (key + 1) (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
+    join
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
+    join
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+22
+PREHOOK: query: -- One of the tables is a sub-query and the other is not.
+-- It should be converted to a sort-merge join.
+explain
+select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- One of the tables is a sub-query and the other is not.
+-- It should be converted to a sort-merge join.
+explain
+select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. 
+-- It should be converted to to a sort-merge join
+explain
+select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on (subq1.key = subq2.key)
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  on (subq1.key = subq3.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. 
+-- It should be converted to to a sort-merge join
+explain
+select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on (subq1.key = subq2.key)
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  on (subq1.key = subq3.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                             Inner Join 0 to 2
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                          2 _col0 (type: int)
+                        Statistics: Num rows: 6 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  on (subq1.key = subq3.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  on (subq1.key = subq3.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+56
+PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from (
+  select subq2.key as key, subq2.value as value1, b.value as value2 from
+  (
+    select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1
+    where key < 6
+  ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from (
+  select subq2.key as key, subq2.value as value1, b.value as value2 from
+  (
+    select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1
+    where key < 6
+  ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Merge Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from (
+  select subq2.key as key, subq2.value as value1, b.value as value2 from
+  (
+    select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1
+    where key < 6
+  ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (
+  select subq2.key as key, subq2.value as value1, b.value as value2 from
+  (
+    select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1
+    where key < 6
+  ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: CREATE TABLE dest1(key int, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: CREATE TABLE dest2(key int, val1 string, val2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest2
+POSTHOOK: query: CREATE TABLE dest2(key int, val1 string, val2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest2
+PREHOOK: query: -- The join is followed by a multi-table insert. It should be converted to
+-- a sort-merge join
+explain
+from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+insert overwrite table dest1 select key, val1
+insert overwrite table dest2 select key, val1, val2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is followed by a multi-table insert. It should be converted to
+-- a sort-merge join
+explain
+from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+insert overwrite table dest1 select key, val1
+insert overwrite table dest2 select key, val1, val2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Merge Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      outputColumnNames: _col0, _col1, _col6
+                      Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), _col1 (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                          File Output Operator
+                            compressed: false
+                            Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                name: default.dest1
+                        File Output Operator
+                          compressed: false
+                          Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                          table:
+                              input format: org.apache.hadoop.mapred.TextInputFormat
+                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                              name: default.dest2
+            Execution mode: llap
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+insert overwrite table dest1 select key, val1
+insert overwrite table dest2 select key, val1, val2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+PREHOOK: Output: default@dest1
+PREHOOK: Output: default@dest2
+POSTHOOK: query: from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+insert overwrite table dest1 select key, val1
+insert overwrite table dest2 select key, val1, val2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+POSTHOOK: Output: default@dest1
+POSTHOOK: Output: default@dest2
+POSTHOOK: Lineage: dest1.key SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: dest1.value SIMPLE [(tbl1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: dest2.key SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: dest2.val1 SIMPLE [(tbl1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: dest2.val2 SIMPLE [(tbl2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from dest2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest2
+#### A masked pattern was here ####
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+0	val_0	val_0
+2	val_2	val_2
+4	val_4	val_4
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+5	val_5	val_5
+8	val_8	val_8
+9	val_9	val_9
+PREHOOK: query: DROP TABLE dest2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@dest2
+PREHOOK: Output: default@dest2
+POSTHOOK: query: DROP TABLE dest2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@dest2
+POSTHOOK: Output: default@dest2
+PREHOOK: query: CREATE TABLE dest2(key int, cnt int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest2
+POSTHOOK: query: CREATE TABLE dest2(key int, cnt int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest2
+PREHOOK: query: -- The join is followed by a multi-table insert, and one of the inserts involves a reducer.
+-- It should be converted to a sort-merge join
+explain
+from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+insert overwrite table dest1 select key, val1
+insert overwrite table dest2 select key, count(*) group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is followed by a multi-table insert, and one of the inserts involves a reducer.
+-- It should be converted to a sort-merge join
+explain
+from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+insert overwrite table dest1 select key, val1
+insert overwrite table dest2 select key, count(*) group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0
+  Stage-1 depends on stages: Stage-3
+  Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Merge Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            name: default.dest1
+                      Select Operator
+                        expressions: _col0 (type: int)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), UDFToInteger(_col1) (type: int)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest2
+
+  Stage: Stage-3
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest1
+
+  Stage: Stage-4
+    Stats-Aggr Operator
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest2
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+PREHOOK: query: from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+insert overwrite table dest1 select key, val1
+insert overwrite table dest2 select key, count(*) group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+PREHOOK: Output: default@dest1
+PREHOOK: Output: default@dest2
+POSTHOOK: query: from (
+  select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+insert overwrite table dest1 select key, val1
+insert overwrite table dest2 select key, count(*) group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+POSTHOOK: Output: default@dest1
+POSTHOOK: Output: default@dest2
+POSTHOOK: Lineage: dest1.key SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: dest1.value SIMPLE [(tbl1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: dest2.cnt EXPRESSION [(tbl1)a.null, (tbl2)b.null, ]
+POSTHOOK: Lineage: dest2.key SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ]
+PREHOOK: query: select * from dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: select * from dest2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest2
+#### A masked pattern was here ####
+0	9
+2	1
+4	1
+5	9
+8	1
+9	1


[02/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/stats11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/stats11.q.out b/ql/src/test/results/clientpositive/llap/stats11.q.out
new file mode 100644
index 0000000..b4643b1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/stats11.q.out
@@ -0,0 +1,996 @@
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: explain
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+POSTHOOK: query: explain
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+  Stage-1 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcbucket_mapjoin_part
+
+  Stage: Stage-1
+    Stats-Aggr Operator
+
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: query: desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08]        	 
+Database:           	default             	 
+Table:              	srcbucket_mapjoin_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	1                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	1358                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	4                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: query: desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08]        	 
+Database:           	default             	 
+Table:              	srcbucket_mapjoin_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	2                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	2750                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	4                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: query: desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08]        	 
+Database:           	default             	 
+Table:              	srcbucket_mapjoin_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	3                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	4200                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	4                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: query: desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08]        	 
+Database:           	default             	 
+Table:              	srcbucket_mapjoin_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	4                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	4                   	 
+Bucket Columns:     	[key]               	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+PREHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    numRows 0
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      numRows 0
+                      rawDataSize 0
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col6
+                Position of Big Table: 1
+                Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 0
+                          numRows 0
+                          rawDataSize 0
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 0
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    numRows 0
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      numRows 0
+                      rawDataSize 0
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col6
+                Position of Big Table: 1
+                Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 1
+                          numRows 464
+                          rawDataSize 8519
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 8983
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 1
+                numRows 464
+                rawDataSize 8519
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 8983
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/subquery_views.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/subquery_views.q.out b/ql/src/test/results/clientpositive/llap/subquery_views.q.out
new file mode 100644
index 0000000..35e80ae
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/subquery_views.q.out
@@ -0,0 +1,571 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- exists test
+create view cv1 as 
+select * 
+from src b 
+where exists
+  (select a.key 
+  from src a 
+  where b.value = a.value  and a.key = b.key and a.value > 'val_9')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@cv1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- exists test
+create view cv1 as 
+select * 
+from src b 
+where exists
+  (select a.key 
+  from src a 
+  where b.value = a.value  and a.key = b.key and a.value > 'val_9')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@cv1
+PREHOOK: query: describe extended cv1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cv1
+POSTHOOK: query: describe extended cv1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cv1
+key                 	string              	                    
+value               	string              	                    
+	 	 
+#### A masked pattern was here ####
+from src b 	 	 
+where exists	 	 
+  (select a.key 	 	 
+  from src a 	 	 
+  where b.value = a.value  and a.key = b.key and a.value > 'val_9'), viewExpandedText:select `b`.`key`, `b`.`value` 	 	 
+from `default`.`src` `b` 	 	 
+where exists	 	 
+  (select `a`.`key` 	 	 
+  from `default`.`src` `a` 	 	 
+  where `b`.`value` = `a`.`value`  and `a`.`key` = `b`.`key` and `a`.`value` > 'val_9'), tableType:VIRTUAL_VIEW)		 
+PREHOOK: query: select * 
+from cv1 where cv1.key in (select key from cv1 c where c.key > '95')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cv1
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * 
+from cv1 where cv1.key in (select key from cv1 c where c.key > '95')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cv1
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+96	val_96
+97	val_97
+97	val_97
+98	val_98
+98	val_98
+PREHOOK: query: -- not in test
+create view cv2 as 
+select * 
+from src b 
+where b.key not in
+  (select a.key 
+  from src a 
+  where b.value = a.value  and a.key = b.key and a.value > 'val_11'
+  )
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@cv2
+POSTHOOK: query: -- not in test
+create view cv2 as 
+select * 
+from src b 
+where b.key not in
+  (select a.key 
+  from src a 
+  where b.value = a.value  and a.key = b.key and a.value > 'val_11'
+  )
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@cv2
+PREHOOK: query: describe extended cv2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cv2
+POSTHOOK: query: describe extended cv2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cv2
+key                 	string              	                    
+value               	string              	                    
+	 	 
+#### A masked pattern was here ####
+from src b 	 	 
+where b.key not in	 	 
+  (select a.key 	 	 
+  from src a 	 	 
+  where b.value = a.value  and a.key = b.key and a.value > 'val_11'	 	 
+  ), viewExpandedText:select `b`.`key`, `b`.`value` 	 	 
+from `default`.`src` `b` 	 	 
+where `b`.`key` not in	 	 
+  (select `a`.`key` 	 	 
+  from `default`.`src` `a` 	 	 
+  where `b`.`value` = `a`.`value`  and `a`.`key` = `b`.`key` and `a`.`value` > 'val_11'	 	 
+  ), tableType:VIRTUAL_VIEW)		 
+Warning: Shuffle Join MERGEJOIN[67][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[69][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 9' is a cross product
+PREHOOK: query: explain
+select * 
+from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * 
+from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 10 <- Map 13 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE)
+        Reducer 12 <- Map 11 (SIMPLE_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
+        Reducer 3 <- Map 7 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 10 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
+        Reducer 6 <- Map 5 (SIMPLE_EDGE)
+        Reducer 9 <- Map 8 (SIMPLE_EDGE), Reducer 12 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  properties:
+                    insideView TRUE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key < '11') (type: boolean)
+                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 11 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((value > 'val_11') and (key is null or value is null)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 13 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((value > 'val_11') and (key < '11')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string), key (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 55 Data size: 14575 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col2 (type: string), _col1 (type: string), _col0 (type: string)
+                        sort order: +++
+                        Map-reduce partition columns: _col2 (type: string), _col1 (type: string), _col0 (type: string)
+                        Statistics: Num rows: 55 Data size: 14575 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((value > 'val_11') and (key is null or value is null)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 7 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((value > 'val_11') and (key < '11')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 9790 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string), key (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 55 Data size: 14575 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col2 (type: string), _col1 (type: string), _col0 (type: string)
+                        sort order: +++
+                        Map-reduce partition columns: _col2 (type: string), _col1 (type: string), _col0 (type: string)
+                        Statistics: Num rows: 55 Data size: 14575 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 8 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  properties:
+                    insideView TRUE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key < '11') (type: boolean)
+                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 10 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                keys:
+                  0 _col0 (type: string), _col1 (type: string), _col0 (type: string)
+                  1 _col2 (type: string), _col1 (type: string), _col0 (type: string)
+                outputColumnNames: _col0, _col3
+                Statistics: Num rows: 166 Data size: 28884 Basic stats: COMPLETE Column stats: COMPLETE
+                Filter Operator
+                  predicate: _col3 is null (type: boolean)
+                  Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: _col0 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: _col0 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 12 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Filter Operator
+                  predicate: (_col0 = 0) (type: boolean)
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 
+                  1 
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string), _col0 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col0 (type: string)
+                  Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                keys:
+                  0 _col0 (type: string), _col1 (type: string), _col0 (type: string)
+                  1 _col2 (type: string), _col1 (type: string), _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 166 Data size: 43990 Basic stats: COMPLETE Column stats: COMPLETE
+                Filter Operator
+                  predicate: _col3 is null (type: boolean)
+                  Statistics: Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: string)
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 6 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Filter Operator
+                  predicate: (_col0 = 0) (type: boolean)
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 9 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 
+                  1 
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string), _col0 (type: string)
+                  sort order: +++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col0 (type: string)
+                  Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join MERGEJOIN[67][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[69][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 9' is a cross product
+PREHOOK: query: select * 
+from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cv2
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * 
+from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cv2
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+100	val_100
+100	val_100
+103	val_103
+103	val_103
+104	val_104
+104	val_104
+105	val_105
+PREHOOK: query: -- in where + having
+create view cv3 as
+select key, value, count(*) 
+from src b
+where b.key in (select key from src where src.key > '8')
+group by key, value
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@cv3
+POSTHOOK: query: -- in where + having
+create view cv3 as
+select key, value, count(*) 
+from src b
+where b.key in (select key from src where src.key > '8')
+group by key, value
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@cv3
+PREHOOK: query: describe extended cv3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cv3
+POSTHOOK: query: describe extended cv3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cv3
+key                 	string              	                    
+value               	string              	                    
+_c2                 	bigint              	                    
+	 	 
+#### A masked pattern was here ####
+from src b	 	 
+where b.key in (select key from src where src.key > '8')	 	 
+group by key, value	 	 
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key ), viewExpandedText:select `b`.`key`, `b`.`value`, count(*) 	 	 
+from `default`.`src` `b`	 	 
+where `b`.`key` in (select `src`.`key` from `default`.`src` where `src`.`key` > '8')	 	 
+group by `b`.`key`, `b`.`value`	 	 
+having count(*) in (select count(*) from `default`.`src` `s1` where `s1`.`key` > '9' group by `s1`.`key` ), tableType:VIRTUAL_VIEW)		 
+PREHOOK: query: select * from cv3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cv3
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from cv3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cv3
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+80	val_80	1
+82	val_82	1
+83	val_83	2
+84	val_84	2
+85	val_85	1
+86	val_86	1
+87	val_87	1
+9	val_9	1
+90	val_90	3
+92	val_92	1
+95	val_95	2
+96	val_96	1
+97	val_97	2
+98	val_98	2
+PREHOOK: query: -- join of subquery views
+select *
+from cv3
+where cv3.key in (select key from cv1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cv1
+PREHOOK: Input: default@cv3
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- join of subquery views
+select *
+from cv3
+where cv3.key in (select key from cv1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cv1
+POSTHOOK: Input: default@cv3
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+90	val_90	3
+92	val_92	1
+95	val_95	2
+96	val_96	1
+97	val_97	2
+98	val_98	2
+PREHOOK: query: drop table tc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table tc (`@d` int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tc
+POSTHOOK: query: create table tc (`@d` int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tc
+PREHOOK: query: insert overwrite table tc select 1 from src limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tc
+POSTHOOK: query: insert overwrite table tc select 1 from src limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tc
+POSTHOOK: Lineage: tc.@d SIMPLE []
+PREHOOK: query: drop view tcv
+PREHOOK: type: DROPVIEW
+POSTHOOK: query: drop view tcv
+POSTHOOK: type: DROPVIEW
+PREHOOK: query: create view tcv as select * from tc b where exists (select a.`@d` from tc a where b.`@d`=a.`@d`)
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@tc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tcv
+POSTHOOK: query: create view tcv as select * from tc b where exists (select a.`@d` from tc a where b.`@d`=a.`@d`)
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@tc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tcv
+PREHOOK: query: describe extended tcv
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tcv
+POSTHOOK: query: describe extended tcv
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tcv
+@d                  	int                 	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: select * from tcv
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tc
+PREHOOK: Input: default@tcv
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tcv
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tc
+POSTHOOK: Input: default@tcv
+#### A masked pattern was here ####
+1


[06/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/smb_mapjoin_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_17.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_17.q.out
new file mode 100644
index 0000000..c69be29
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_17.q.out
@@ -0,0 +1,1069 @@
+PREHOOK: query: -- Create bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- Create bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table3
+PREHOOK: query: CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table4
+POSTHOOK: query: CREATE TABLE test_table4 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table4
+PREHOOK: query: CREATE TABLE test_table5 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table5
+POSTHOOK: query: CREATE TABLE test_table5 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table5
+PREHOOK: query: CREATE TABLE test_table6 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table6
+POSTHOOK: query: CREATE TABLE test_table6 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table6
+PREHOOK: query: CREATE TABLE test_table7 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table7
+POSTHOOK: query: CREATE TABLE test_table7 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table7
+PREHOOK: query: CREATE TABLE test_table8 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table8
+POSTHOOK: query: CREATE TABLE test_table8 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table8
+PREHOOK: query: INSERT OVERWRITE TABLE test_table1
+SELECT * FROM src WHERE key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table1
+SELECT * FROM src WHERE key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1
+POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE test_table2
+SELECT * FROM src  WHERE key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table2
+SELECT * FROM src  WHERE key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table2
+POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3
+SELECT * FROM src WHERE key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3
+SELECT * FROM src WHERE key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table3
+POSTHOOK: Lineage: test_table3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE test_table4
+SELECT * FROM src  WHERE key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table4
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table4
+SELECT * FROM src  WHERE key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table4
+POSTHOOK: Lineage: test_table4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE test_table5
+SELECT * FROM src WHERE key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table5
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table5
+SELECT * FROM src WHERE key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table5
+POSTHOOK: Lineage: test_table5.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE test_table6
+SELECT * FROM src  WHERE key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table6
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table6
+SELECT * FROM src  WHERE key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table6
+POSTHOOK: Lineage: test_table6.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table6.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE test_table7
+SELECT * FROM src WHERE key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table7
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table7
+SELECT * FROM src WHERE key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table7
+POSTHOOK: Lineage: test_table7.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table7.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE test_table8
+SELECT * FROM src  WHERE key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table8
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table8
+SELECT * FROM src  WHERE key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table8
+POSTHOOK: Lineage: test_table8.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table8.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job upto 7 tables
+EXPLAIN
+SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*)
+FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+JOIN test_table3 c ON a.key = c.key
+JOIN test_table4 d ON a.key = d.key
+JOIN test_table5 e ON a.key = e.key
+JOIN test_table6 f ON a.key = f.key
+JOIN test_table7 g ON a.key = g.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job upto 7 tables
+EXPLAIN
+SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*)
+FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+JOIN test_table3 c ON a.key = c.key
+JOIN test_table4 d ON a.key = d.key
+JOIN test_table5 e ON a.key = e.key
+JOIN test_table6 f ON a.key = f.key
+JOIN test_table7 g ON a.key = g.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE), Map 8 (SIMPLE_EDGE), Map 9 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 7 
+            Map Operator Tree:
+                TableScan
+                  alias: e
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 8 
+            Map Operator Tree:
+                TableScan
+                  alias: f
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 9 
+            Map Operator Tree:
+                TableScan
+                  alias: g
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 0 to 2
+                     Inner Join 0 to 3
+                     Inner Join 0 to 4
+                     Inner Join 0 to 5
+                     Inner Join 0 to 6
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                  3 key (type: int)
+                  4 key (type: int)
+                  5 key (type: int)
+                  6 key (type: int)
+                Statistics: Num rows: 66 Data size: 462 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*)
+FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+JOIN test_table3 c ON a.key = c.key
+JOIN test_table4 d ON a.key = d.key
+JOIN test_table5 e ON a.key = e.key
+JOIN test_table6 f ON a.key = f.key
+JOIN test_table7 g ON a.key = g.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table4
+PREHOOK: Input: default@test_table5
+PREHOOK: Input: default@test_table6
+PREHOOK: Input: default@test_table7
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*)
+FROM test_table1 a JOIN test_table2 b ON a.key = b.key
+JOIN test_table3 c ON a.key = c.key
+JOIN test_table4 d ON a.key = d.key
+JOIN test_table5 e ON a.key = e.key
+JOIN test_table6 f ON a.key = f.key
+JOIN test_table7 g ON a.key = g.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table4
+POSTHOOK: Input: default@test_table5
+POSTHOOK: Input: default@test_table6
+POSTHOOK: Input: default@test_table7
+#### A masked pattern was here ####
+4378
+PREHOOK: query: -- It should be automatically converted to a sort-merge join followed by a groupby in
+-- a single MR job
+EXPLAIN
+SELECT count(*)
+FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- It should be automatically converted to a sort-merge join followed by a groupby in
+-- a single MR job
+EXPLAIN
+SELECT count(*)
+FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: e
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: f
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: g
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Merge Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                           Left Outer Join0 to 2
+                           Left Outer Join0 to 3
+                           Left Outer Join0 to 4
+                           Left Outer Join0 to 5
+                           Left Outer Join0 to 6
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                        2 _col0 (type: int)
+                        3 _col0 (type: int)
+                        4 _col0 (type: int)
+                        5 _col0 (type: int)
+                        6 _col0 (type: int)
+                      Statistics: Num rows: 66 Data size: 462 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT count(*)
+FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table4
+PREHOOK: Input: default@test_table5
+PREHOOK: Input: default@test_table6
+PREHOOK: Input: default@test_table7
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT count(*)
+FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table4
+POSTHOOK: Input: default@test_table5
+POSTHOOK: Input: default@test_table6
+POSTHOOK: Input: default@test_table7
+#### A masked pattern was here ####
+4378
+PREHOOK: query: EXPLAIN
+SELECT count(*)
+FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+LEFT OUTER JOIN test_table8 h ON a.key = h.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT count(*)
+FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+LEFT OUTER JOIN test_table8 h ON a.key = h.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: e
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: f
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: g
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: h
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Merge Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                           Left Outer Join0 to 2
+                           Left Outer Join0 to 3
+                           Left Outer Join0 to 4
+                           Left Outer Join0 to 5
+                           Left Outer Join0 to 6
+                           Left Outer Join0 to 7
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                        2 _col0 (type: int)
+                        3 _col0 (type: int)
+                        4 _col0 (type: int)
+                        5 _col0 (type: int)
+                        6 _col0 (type: int)
+                        7 _col0 (type: int)
+                      Statistics: Num rows: 77 Data size: 539 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: llap
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT count(*)
+FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+LEFT OUTER JOIN test_table8 h ON a.key = h.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table4
+PREHOOK: Input: default@test_table5
+PREHOOK: Input: default@test_table6
+PREHOOK: Input: default@test_table7
+PREHOOK: Input: default@test_table8
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT count(*)
+FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+LEFT OUTER JOIN test_table8 h ON a.key = h.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table4
+POSTHOOK: Input: default@test_table5
+POSTHOOK: Input: default@test_table6
+POSTHOOK: Input: default@test_table7
+POSTHOOK: Input: default@test_table8
+#### A masked pattern was here ####
+13126
+PREHOOK: query: -- outer join with max 16 aliases
+EXPLAIN
+SELECT a.*
+FROM test_table1 a
+LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+LEFT OUTER JOIN test_table8 h ON a.key = h.key
+LEFT OUTER JOIN test_table4 i ON a.key = i.key
+LEFT OUTER JOIN test_table5 j ON a.key = j.key
+LEFT OUTER JOIN test_table6 k ON a.key = k.key
+LEFT OUTER JOIN test_table7 l ON a.key = l.key
+LEFT OUTER JOIN test_table8 m ON a.key = m.key
+LEFT OUTER JOIN test_table7 n ON a.key = n.key
+LEFT OUTER JOIN test_table8 o ON a.key = o.key
+LEFT OUTER JOIN test_table4 p ON a.key = p.key
+LEFT OUTER JOIN test_table5 q ON a.key = q.key
+LEFT OUTER JOIN test_table6 r ON a.key = r.key
+LEFT OUTER JOIN test_table7 s ON a.key = s.key
+LEFT OUTER JOIN test_table8 t ON a.key = t.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- outer join with max 16 aliases
+EXPLAIN
+SELECT a.*
+FROM test_table1 a
+LEFT OUTER JOIN test_table2 b ON a.key = b.key
+LEFT OUTER JOIN test_table3 c ON a.key = c.key
+LEFT OUTER JOIN test_table4 d ON a.key = d.key
+LEFT OUTER JOIN test_table5 e ON a.key = e.key
+LEFT OUTER JOIN test_table6 f ON a.key = f.key
+LEFT OUTER JOIN test_table7 g ON a.key = g.key
+LEFT OUTER JOIN test_table8 h ON a.key = h.key
+LEFT OUTER JOIN test_table4 i ON a.key = i.key
+LEFT OUTER JOIN test_table5 j ON a.key = j.key
+LEFT OUTER JOIN test_table6 k ON a.key = k.key
+LEFT OUTER JOIN test_table7 l ON a.key = l.key
+LEFT OUTER JOIN test_table8 m ON a.key = m.key
+LEFT OUTER JOIN test_table7 n ON a.key = n.key
+LEFT OUTER JOIN test_table8 o ON a.key = o.key
+LEFT OUTER JOIN test_table4 p ON a.key = p.key
+LEFT OUTER JOIN test_table5 q ON a.key = q.key
+LEFT OUTER JOIN test_table6 r ON a.key = r.key
+LEFT OUTER JOIN test_table7 s ON a.key = s.key
+LEFT OUTER JOIN test_table8 t ON a.key = t.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 18 (SIMPLE_EDGE), Map 19 (SIMPLE_EDGE), Map 20 (SIMPLE_EDGE), Map 21 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: d
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: e
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: f
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: g
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: h
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: i
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: j
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: k
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: l
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: m
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: n
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: o
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: p
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Merge Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                           Left Outer Join0 to 2
+                           Left Outer Join0 to 3
+                           Left Outer Join0 to 4
+                           Left Outer Join0 to 5
+                           Left Outer Join0 to 6
+                           Left Outer Join0 to 7
+                           Left Outer Join0 to 8
+                           Left Outer Join0 to 9
+                           Left Outer Join0 to 10
+                           Left Outer Join0 to 11
+                           Left Outer Join0 to 12
+                           Left Outer Join0 to 13
+                           Left Outer Join0 to 14
+                           Left Outer Join0 to 15
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                        10 _col0 (type: int)
+                        11 _col0 (type: int)
+                        12 _col0 (type: int)
+                        13 _col0 (type: int)
+                        14 _col0 (type: int)
+                        15 _col0 (type: int)
+                        2 _col0 (type: int)
+                        3 _col0 (type: int)
+                        4 _col0 (type: int)
+                        5 _col0 (type: int)
+                        6 _col0 (type: int)
+                        7 _col0 (type: int)
+                        8 _col0 (type: int)
+                        9 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 165 Data size: 1155 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 165 Data size: 1155 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+        Map 18 
+            Map Operator Tree:
+                TableScan
+                  alias: q
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 19 
+            Map Operator Tree:
+                TableScan
+                  alias: r
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 20 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 21 
+            Map Operator Tree:
+                TableScan
+                  alias: t
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                     Left Outer Join0 to 2
+                     Left Outer Join0 to 3
+                     Left Outer Join0 to 4
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                  2 _col0 (type: int)
+                  3 _col0 (type: int)
+                  4 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 726 Data size: 5082 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 726 Data size: 5082 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/smb_mapjoin_18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_18.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_18.q.out
new file mode 100644
index 0000000..c29a36b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_18.q.out
@@ -0,0 +1,480 @@
+PREHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1@ds=1
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.test_table2
+            Execution mode: llap
+            LLAP IO: no inputs
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table2
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Output: default@test_table2@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Output: default@test_table2@ds=1
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(*) from test_table1 where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 2 = 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 2 = 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+247
+PREHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 2 = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 2 = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+253
+PREHOOK: query: select count(*) from test_table1 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+247
+PREHOOK: query: select count(*) from test_table1 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+253
+PREHOOK: query: select count(*) from test_table2 where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 2 = 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 2 = 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+247
+PREHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 2 = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 2 = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+253
+PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+247
+PREHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+253
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation, one of the buckets should be empty
+
+EXPLAIN
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation, one of the buckets should be empty
+
+EXPLAIN
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key = 238) (type: boolean)
+                    Statistics: Num rows: 250 Data size: 4656 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: 238 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 250 Data size: 4656 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 250 Data size: 4656 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 4656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 4656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table2
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table2
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Output: default@test_table2@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Output: default@test_table2@ds=2
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE []
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(*) from test_table2 where ds = '2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+2
+PREHOOK: query: select count(*) from test_table2 where ds = '2' and hash(key) % 2 = 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '2' and hash(key) % 2 = 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=2
+#### A masked pattern was here ####
+2
+PREHOOK: query: select count(*) from test_table2 where ds = '2' and hash(key) % 2 = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '2' and hash(key) % 2 = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=2
+#### A masked pattern was here ####
+0
+PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=2
+#### A masked pattern was here ####
+2
+PREHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=2
+#### A masked pattern was here ####
+0
+PREHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3')
+SELECT a.key, a.value FROM test_table2 a WHERE a.ds = '2'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '3')
+SELECT a.key, a.value FROM test_table2 a WHERE a.ds = '2'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 2 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 2 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.test_table2
+            Execution mode: llap
+            LLAP IO: no inputs
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 3
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table2
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
+SELECT a.key, a.value FROM test_table2 a WHERE a.ds = '2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=2
+PREHOOK: Output: default@test_table2@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2')
+SELECT a.key, a.value FROM test_table2 a WHERE a.ds = '2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=2
+POSTHOOK: Output: default@test_table2@ds=2
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE [(test_table2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(test_table2)a.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(*) from test_table2 where ds = '3'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '3'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+0
+PREHOOK: query: select count(*) from test_table2 where ds = '3' and hash(key) % 2 = 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '3' and hash(key) % 2 = 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+0
+PREHOOK: query: select count(*) from test_table2 where ds = '3' and hash(key) % 2 = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '3' and hash(key) % 2 = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+0
+PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '3'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '3'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+0
+PREHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '3'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 2 out of 2) s where ds = '3'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+0

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/smb_mapjoin_19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_19.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_19.q.out
new file mode 100644
index 0000000..1b53fdb
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_19.q.out
@@ -0,0 +1,259 @@
+PREHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1@ds=1
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.test_table2
+            Execution mode: llap
+            LLAP IO: no inputs
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table2
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Output: default@test_table2@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1')
+SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Output: default@test_table2@ds=1
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(test_table1)a.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(*) from test_table1 where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 16 = 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 16 = 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+36
+PREHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 16 = 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 16 = 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+40
+PREHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 16 = 12
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 where ds = '1' and hash(key) % 16 = 12
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+29
+PREHOOK: query: select count(*) from test_table1 tablesample (bucket 1 out of 16) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 tablesample (bucket 1 out of 16) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+36
+PREHOOK: query: select count(*) from test_table1 tablesample (bucket 6 out of 16) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 tablesample (bucket 6 out of 16) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+40
+PREHOOK: query: select count(*) from test_table1 tablesample (bucket 13 out of 16) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table1 tablesample (bucket 13 out of 16) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+#### A masked pattern was here ####
+29
+PREHOOK: query: select count(*) from test_table2 where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 16 = 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 16 = 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+36
+PREHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 16 = 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 16 = 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+40
+PREHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 16 = 12
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 where ds = '1' and hash(key) % 16 = 12
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+29
+PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 16) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 16) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+36
+PREHOOK: query: select count(*) from test_table2 tablesample (bucket 6 out of 16) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 6 out of 16) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+40
+PREHOOK: query: select count(*) from test_table2 tablesample (bucket 13 out of 16) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from test_table2 tablesample (bucket 13 out of 16) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+#### A masked pattern was here ####
+29


[29/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out
new file mode 100644
index 0000000..4b4a95e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out
@@ -0,0 +1,1239 @@
+PREHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: CREATE TABLE test_table3 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table3
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT key, key+1, value where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT key, key+1, value where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1@ds=1
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT key, key+1, value where key < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table2@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT key, key+1, value where key < 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table2@ds=1
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation, since the sort-order matches
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.key2, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation, since the sort-order matches
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.key2, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int), _col1 (type: int)
+                          1 _col0 (type: int), _col1 (type: int)
+                        outputColumnNames: _col0, _col1, _col2, _col6
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col6) (type: string)
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int), _col1 (type: int)
+                            sort order: +-
+                            Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                            Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.key2, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, a.key2, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key2, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+2	3	val_2val_2	1
+4	5	val_4val_4	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+8	9	val_8val_8	1
+9	10	val_9val_9	1
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation, since the sort-order matches
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq1.key, subq1.key2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation, since the sort-order matches
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq1.key, subq1.key2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int), _col1 (type: int)
+                          1 _col0 (type: int), _col1 (type: int)
+                        outputColumnNames: _col0, _col1, _col2, _col6
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col6) (type: string)
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int), _col1 (type: int)
+                            sort order: +-
+                            Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                            Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq1.key, subq1.key2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq1.key, subq1.key2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key2, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+2	3	val_2val_2	1
+4	5	val_4val_4	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+8	9	val_8val_8	1
+9	10	val_9val_9	1
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-reduce operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key2, a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-reduce operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key2, a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int), _col1 (type: int)
+                          1 _col0 (type: int), _col1 (type: int)
+                        outputColumnNames: _col0, _col1, _col2, _col6
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col6) (type: string)
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int), _col1 (type: int)
+                            sort order: +-
+                            Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                            Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-reduce operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq1.key2, subq1.key, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-reduce operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq1.key2, subq1.key, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int), _col1 (type: int)
+                          1 _col0 (type: int), _col1 (type: int)
+                        outputColumnNames: _col0, _col1, _col2, _col6
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col6) (type: string)
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int), _col1 (type: int)
+                            sort order: +-
+                            Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                            Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq2.key, subq2.key2, subq2.value from
+(
+SELECT subq1.key2, subq1.key, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq2.key, subq2.key2, subq2.value from
+(
+SELECT subq1.key2, subq1.key, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int), _col1 (type: int)
+                          1 _col0 (type: int), _col1 (type: int)
+                        outputColumnNames: _col0, _col1, _col2, _col6
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col6) (type: string)
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int), _col1 (type: int)
+                            sort order: +-
+                            Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                            Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq2.key, subq2.key2, subq2.value from
+(
+SELECT subq1.key2, subq1.key, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq2.key, subq2.key2, subq2.value from
+(
+SELECT subq1.key2, subq1.key, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key2, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+2	3	val_2val_2	1
+4	5	val_4val_4	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+8	9	val_8val_8	1
+9	10	val_9val_9	1
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq2.k2, subq2.k1, subq2.value from
+(
+SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq2.k2, subq2.k1, subq2.value from
+(
+SELECT subq1.key2 as k1, subq1.key as k2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int), _col1 (type: int)
+                          1 _col0 (type: int), _col1 (type: int)
+                        outputColumnNames: _col0, _col1, _col2, _col6
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col6) (type: string)
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int), _col1 (type: int)
+                            sort order: +-
+                            Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                            Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq2.k2, subq2.k1, subq2.value from
+(
+SELECT subq1.key2 as k1, subq1.key  as k2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT subq2.k2, subq2.k1, subq2.value from
+(
+SELECT subq1.key2 as k1, subq1.key  as k2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key2 SIMPLE [(test_table1)a.FieldSchema(name:key2, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+0	1	val_0val_0	1
+2	3	val_2val_2	1
+4	5	val_4val_4	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+5	6	val_5val_5	1
+8	9	val_8val_8	1
+9	10	val_9val_9	1
+PREHOOK: query: CREATE TABLE test_table4 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key, key2) SORTED BY (key DESC, key2 DESC) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table4
+POSTHOOK: query: CREATE TABLE test_table4 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key, key2) SORTED BY (key DESC, key2 DESC) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table4
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-reduce operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table4 PARTITION (ds = '1')
+SELECT subq2.k2, subq2.k1, subq2.value from
+(
+SELECT subq1.key2 as k1, subq1.key  as k2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-reduce operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table4 PARTITION (ds = '1')
+SELECT subq2.k2, subq2.k1, subq2.value from
+(
+SELECT subq1.key2 as k1, subq1.key  as k2, subq1.value from
+(
+SELECT a.key, a.key2, concat(a.value, b.value) as value
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1'
+)subq1
+)subq2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                        Statistics: Num rows: 10 Data size: 171 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and key2 is not null) (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), key2 (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 84 Data size: 1651 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int), _col1 (type: int)
+                          1 _col0 (type: int), _col1 (type: int)
+                        outputColumnNames: _col0, _col1, _col2, _col6
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col6) (type: string)
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int), _col1 (type: int)
+                            sort order: --
+                            Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                            Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col2 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1816 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table4
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table4
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out
new file mode 100644
index 0000000..11de932
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out
@@ -0,0 +1,621 @@
+PREHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table3
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1@ds=1
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table2@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table2@ds=1
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+and (a.key = 0 or a.key = 5)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+and (a.key = 0 or a.key = 5)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((key = 0) or (key = 5)) and key is not null) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 150 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 10 Data size: 150 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 150 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1408 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((key = 0) or (key = 5)) and key is not null) (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1408 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 84 Data size: 1408 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col4
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col4) (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+and (a.key = 0 or a.key = 5)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+and (a.key = 0 or a.key = 5)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)a.FieldSchema(name:value, type:string, comment:null), (test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+PREHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a 
+JOIN 
+(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a 
+JOIN 
+(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table1
+                  Statistics: Num rows: 10 Data size: 150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((key = 0) or (key = 5)) and key is not null) (type: boolean)
+                    Statistics: Num rows: 10 Data size: 150 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 10 Data size: 150 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 150 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table2
+                  Statistics: Num rows: 84 Data size: 1408 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((key = 0) or (key = 5)) and key is not null) (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1408 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 84 Data size: 1408 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col3
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col3) (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a 
+JOIN 
+(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1' and (key = 0 or key = 5)) a 
+JOIN 
+(select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+PREHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1' and key < 8) a 
+JOIN 
+(select key, value from test_table2 where ds = '1' and key < 8) b 
+ON a.key = b.key
+WHERE a.key = 0 or a.key = 5
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1' and key < 8) a 
+JOIN 
+(select key, value from test_table2 where ds = '1' and key < 8) b 
+ON a.key = b.key
+WHERE a.key = 0 or a.key = 5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table1
+                  Statistics: Num rows: 10 Data size: 150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and ((key = 0) or (key = 5))) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 2 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table2
+                  Statistics: Num rows: 84 Data size: 1408 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and ((key = 0) or (key = 5))) (type: boolean)
+                    Statistics: Num rows: 28 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 28 Data size: 469 Basic stats: COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col3
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 30 Data size: 515 Basic stats: COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col3) (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 30 Data size: 515 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 30 Data size: 515 Basic stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 30 Data size: 515 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 30 Data size: 515 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1' and key < 8) a 
+JOIN 
+(select key, value from test_table2 where ds = '1' and key < 8) b 
+ON a.key = b.key
+WHERE a.key = 0 or a.key = 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1' and key < 8) a 
+JOIN 
+(select key, value from test_table2 where ds = '1' and key < 8) b 
+ON a.key = b.key
+WHERE a.key = 0 or a.key = 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION [(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), (test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+0	val_0val_0	1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1
+5	val_5val_5	1


[34/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/authorization_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/authorization_2.q.out b/ql/src/test/results/clientpositive/llap/authorization_2.q.out
new file mode 100644
index 0000000..449cd02
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/authorization_2.q.out
@@ -0,0 +1,667 @@
+PREHOOK: query: -- SORT_BEFORE_DIFF
+
+create table authorization_part (key int, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: -- SORT_BEFORE_DIFF
+
+create table authorization_part (key int, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: create table src_auth_tmp as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_auth_tmp
+POSTHOOK: query: create table src_auth_tmp as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_auth_tmp
+POSTHOOK: Lineage: src_auth_tmp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_auth_tmp.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE")
+PREHOOK: type: ALTERTABLE_PROPERTIES
+PREHOOK: Input: default@authorization_part
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE")
+POSTHOOK: type: ALTERTABLE_PROPERTIES
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: -- column grant to user
+grant Create on  authorization_part to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: -- column grant to user
+grant Create on  authorization_part to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: grant Update on table authorization_part to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: grant Update on table authorization_part to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: grant Drop on table authorization_part to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: grant Drop on table authorization_part to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: grant select on table src_auth_tmp to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@src_auth_tmp
+POSTHOOK: query: grant select on table src_auth_tmp to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@src_auth_tmp
+PREHOOK: query: show grant user hive_test_user on table authorization_part
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part			hive_test_user	USER	CREATE	false	-1	hive_test_user
+default	authorization_part			hive_test_user	USER	DROP	false	-1	hive_test_user
+default	authorization_part			hive_test_user	USER	UPDATE	false	-1	hive_test_user
+PREHOOK: query: alter table authorization_part add partition (ds='2010')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: alter table authorization_part add partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]		hive_test_user	USER	CREATE	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	DROP	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	UPDATE	false	-1	hive_test_user
+PREHOOK: query: grant select(key) on table authorization_part to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: grant select(key) on table authorization_part to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: alter table authorization_part drop partition (ds='2010')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@authorization_part
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: alter table authorization_part drop partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_auth_tmp
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_auth_tmp
+POSTHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]	[key]	hive_test_user	USER	SELECT	false	-1	hive_test_user
+PREHOOK: query: show grant user hive_test_user on table authorization_part(key)
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part(key)
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part		[key]	hive_test_user	USER	SELECT	false	-1	hive_test_user
+PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@authorization_part
+PREHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+0
+0
+0
+2
+4
+5
+5
+5
+8
+9
+10
+11
+12
+12
+15
+15
+17
+18
+18
+19
+PREHOOK: query: revoke select(key) on table authorization_part from user hive_test_user
+PREHOOK: type: REVOKE_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: revoke select(key) on table authorization_part from user hive_test_user
+POSTHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: show grant user hive_test_user on table authorization_part(key)
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part(key)
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]	[key]	hive_test_user	USER	SELECT	false	-1	hive_test_user
+PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@authorization_part
+PREHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+0
+0
+0
+2
+4
+5
+5
+5
+8
+9
+10
+11
+12
+12
+15
+15
+17
+18
+18
+19
+PREHOOK: query: revoke select(key) on table authorization_part partition (ds='2010') from user hive_test_user
+PREHOOK: type: REVOKE_PRIVILEGE
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: revoke select(key) on table authorization_part partition (ds='2010') from user hive_test_user
+POSTHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: alter table authorization_part drop partition (ds='2010')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@authorization_part
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: alter table authorization_part drop partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: -- table grant to user
+show grant user hive_test_user on table authorization_part
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: -- table grant to user
+show grant user hive_test_user on table authorization_part
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part			hive_test_user	USER	CREATE	false	-1	hive_test_user
+default	authorization_part			hive_test_user	USER	DROP	false	-1	hive_test_user
+default	authorization_part			hive_test_user	USER	UPDATE	false	-1	hive_test_user
+PREHOOK: query: alter table authorization_part add partition (ds='2010')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: alter table authorization_part add partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]		hive_test_user	USER	CREATE	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	DROP	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	UPDATE	false	-1	hive_test_user
+PREHOOK: query: grant select on table authorization_part to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: grant select on table authorization_part to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: alter table authorization_part drop partition (ds='2010')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@authorization_part
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: alter table authorization_part drop partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_auth_tmp
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_auth_tmp
+POSTHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]		hive_test_user	USER	CREATE	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	DROP	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	SELECT	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	UPDATE	false	-1	hive_test_user
+PREHOOK: query: show grant user hive_test_user on table authorization_part
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part			hive_test_user	USER	CREATE	false	-1	hive_test_user
+default	authorization_part			hive_test_user	USER	DROP	false	-1	hive_test_user
+default	authorization_part			hive_test_user	USER	SELECT	false	-1	hive_test_user
+default	authorization_part			hive_test_user	USER	UPDATE	false	-1	hive_test_user
+PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@authorization_part
+PREHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+0
+0
+0
+2
+4
+5
+5
+5
+8
+9
+10
+11
+12
+12
+15
+15
+17
+18
+18
+19
+PREHOOK: query: revoke select on table authorization_part from user hive_test_user
+PREHOOK: type: REVOKE_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: revoke select on table authorization_part from user hive_test_user
+POSTHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: show grant user hive_test_user on table authorization_part
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part			hive_test_user	USER	CREATE	false	-1	hive_test_user
+default	authorization_part			hive_test_user	USER	DROP	false	-1	hive_test_user
+default	authorization_part			hive_test_user	USER	UPDATE	false	-1	hive_test_user
+PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]		hive_test_user	USER	CREATE	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	DROP	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	SELECT	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	UPDATE	false	-1	hive_test_user
+PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@authorization_part
+PREHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+0
+0
+0
+2
+4
+5
+5
+5
+8
+9
+10
+11
+12
+12
+15
+15
+17
+18
+18
+19
+PREHOOK: query: revoke select on table authorization_part partition (ds='2010') from user hive_test_user
+PREHOOK: type: REVOKE_PRIVILEGE
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: revoke select on table authorization_part partition (ds='2010') from user hive_test_user
+POSTHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]		hive_test_user	USER	CREATE	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	DROP	false	-1	hive_test_user
+default	authorization_part	[2010]		hive_test_user	USER	UPDATE	false	-1	hive_test_user
+PREHOOK: query: alter table authorization_part drop partition (ds='2010')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@authorization_part
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: alter table authorization_part drop partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: -- column grant to group
+
+show grant group hive_test_group1 on table authorization_part
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: -- column grant to group
+
+show grant group hive_test_group1 on table authorization_part
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: alter table authorization_part add partition (ds='2010')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: alter table authorization_part add partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: grant select(key) on table authorization_part to group hive_test_group1
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: grant select(key) on table authorization_part to group hive_test_group1
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: alter table authorization_part drop partition (ds='2010')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@authorization_part
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: alter table authorization_part drop partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_auth_tmp
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_auth_tmp
+POSTHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]	[key]	hive_test_group1	GROUP	SELECT	false	-1	hive_test_user
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key)
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key)
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part		[key]	hive_test_group1	GROUP	SELECT	false	-1	hive_test_user
+PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@authorization_part
+PREHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+0
+0
+0
+2
+4
+5
+5
+5
+8
+9
+10
+11
+12
+12
+15
+15
+17
+18
+18
+19
+PREHOOK: query: revoke select(key) on table authorization_part from group hive_test_group1
+PREHOOK: type: REVOKE_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: revoke select(key) on table authorization_part from group hive_test_group1
+POSTHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key)
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key)
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]	[key]	hive_test_group1	GROUP	SELECT	false	-1	hive_test_user
+PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@authorization_part
+PREHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+0
+0
+0
+2
+4
+5
+5
+5
+8
+9
+10
+11
+12
+12
+15
+15
+17
+18
+18
+19
+PREHOOK: query: revoke select(key) on table authorization_part partition (ds='2010') from group hive_test_group1
+PREHOOK: type: REVOKE_PRIVILEGE
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: revoke select(key) on table authorization_part partition (ds='2010') from group hive_test_group1
+POSTHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: alter table authorization_part drop partition (ds='2010')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@authorization_part
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: alter table authorization_part drop partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: -- table grant to group
+show grant group hive_test_group1 on table authorization_part
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: -- table grant to group
+show grant group hive_test_group1 on table authorization_part
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: alter table authorization_part add partition (ds='2010')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: alter table authorization_part add partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: grant select on table authorization_part to group hive_test_group1
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: grant select on table authorization_part to group hive_test_group1
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: alter table authorization_part drop partition (ds='2010')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@authorization_part
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: alter table authorization_part drop partition (ds='2010')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_auth_tmp
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src_auth_tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_auth_tmp
+POSTHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src_auth_tmp)src_auth_tmp.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src_auth_tmp)src_auth_tmp.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]		hive_test_group1	GROUP	SELECT	false	-1	hive_test_user
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part			hive_test_group1	GROUP	SELECT	false	-1	hive_test_user
+PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@authorization_part
+PREHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+0
+0
+0
+2
+4
+5
+5
+5
+8
+9
+10
+11
+12
+12
+15
+15
+17
+18
+18
+19
+PREHOOK: query: revoke select on table authorization_part from group hive_test_group1
+PREHOOK: type: REVOKE_PRIVILEGE
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: revoke select on table authorization_part from group hive_test_group1
+POSTHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: Output: default@authorization_part
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+default	authorization_part	[2010]		hive_test_group1	GROUP	SELECT	false	-1	hive_test_user
+PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@authorization_part
+PREHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Input: default@authorization_part@ds=2010
+#### A masked pattern was here ####
+0
+0
+0
+2
+4
+5
+5
+5
+8
+9
+10
+11
+12
+12
+15
+15
+17
+18
+18
+19
+PREHOOK: query: revoke select on table authorization_part partition (ds='2010') from group hive_test_group1
+PREHOOK: type: REVOKE_PRIVILEGE
+PREHOOK: Output: default@authorization_part@ds=2010
+POSTHOOK: query: revoke select on table authorization_part partition (ds='2010') from group hive_test_group1
+POSTHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: Output: default@authorization_part@ds=2010
+PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010')
+POSTHOOK: type: SHOW_GRANT
+PREHOOK: query: revoke select on table src_auth_tmp from user hive_test_user
+PREHOOK: type: REVOKE_PRIVILEGE
+PREHOOK: Output: default@src_auth_tmp
+POSTHOOK: query: revoke select on table src_auth_tmp from user hive_test_user
+POSTHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: Output: default@src_auth_tmp
+PREHOOK: query: drop table authorization_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@authorization_part
+PREHOOK: Output: default@authorization_part
+POSTHOOK: query: drop table authorization_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@authorization_part
+POSTHOOK: Output: default@authorization_part


[14/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/orc_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/llap/orc_predicate_pushdown.q.out
new file mode 100644
index 0000000..db0baee
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_predicate_pushdown.q.out
@@ -0,0 +1,1184 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE orc_pred(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_pred
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE orc_pred(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_pred
+PREHOOK: query: ALTER TABLE orc_pred SET SERDEPROPERTIES ('orc.row.index.stride' = '1000')
+PREHOOK: type: ALTERTABLE_SERDEPROPERTIES
+PREHOOK: Input: default@orc_pred
+PREHOOK: Output: default@orc_pred
+POSTHOOK: query: ALTER TABLE orc_pred SET SERDEPROPERTIES ('orc.row.index.stride' = '1000')
+POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES
+POSTHOOK: Input: default@orc_pred
+POSTHOOK: Output: default@orc_pred
+PREHOOK: query: CREATE TABLE staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@staging
+POSTHOOK: query: CREATE TABLE staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@staging
+PREHOOK: query: INSERT INTO TABLE orc_pred select * from staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_pred
+POSTHOOK: query: INSERT INTO TABLE orc_pred select * from staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_pred
+POSTHOOK: Lineage: orc_pred.b SIMPLE [(staging)staging.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_pred.bin SIMPLE [(staging)staging.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: orc_pred.bo SIMPLE [(staging)staging.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_pred.d SIMPLE [(staging)staging.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_pred.dec SIMPLE [(staging)staging.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: orc_pred.f SIMPLE [(staging)staging.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_pred.i SIMPLE [(staging)staging.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_pred.s SIMPLE [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_pred.si SIMPLE [(staging)staging.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_pred.t SIMPLE [(staging)staging.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: orc_pred.ts SIMPLE [(staging)staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+PREHOOK: query: -- no predicate case. the explain plan should not have filter expression in table scan operator
+
+SELECT SUM(HASH(t)) FROM orc_pred
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: -- no predicate case. the explain plan should not have filter expression in table scan operator
+
+SELECT SUM(HASH(t)) FROM orc_pred
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+62430
+PREHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+62430
+PREHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM orc_pred
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM orc_pred
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  Statistics: Num rows: 6037 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hash(t) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6037 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: sum(_col0)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM orc_pred
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM orc_pred
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  Statistics: Num rows: 6037 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hash(t) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6037 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: sum(_col0)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- all the following queries have predicates which are pushed down to table scan operator if
+-- hive.optimize.index.filter is set to true. the explain plan should show filter expression
+-- in table scan operator.
+
+SELECT * FROM orc_pred WHERE t<2 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: -- all the following queries have predicates which are pushed down to table scan operator if
+-- hive.optimize.index.filter is set to true. the explain plan should show filter expression
+-- in table scan operator.
+
+SELECT * FROM orc_pred WHERE t<2 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+-3	467	65575	4294967437	81.64	23.53	true	tom hernandez	2013-03-01 09:11:58.703188	32.85	study skills
+PREHOOK: query: SELECT * FROM orc_pred WHERE t<2 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM orc_pred WHERE t<2 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+-3	467	65575	4294967437	81.64	23.53	true	tom hernandez	2013-03-01 09:11:58.703188	32.85	study skills
+PREHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
+PREHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
+PREHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+-8
+PREHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+-8
+PREHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM orc_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM orc_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  Statistics: Num rows: 6037 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean)
+                    Statistics: Num rows: 670 Data size: 2680 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: hash(t) (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 670 Data size: 2680 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: sum(_col0)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM orc_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM orc_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  filterExpr: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean)
+                  Statistics: Num rows: 6037 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean)
+                    Statistics: Num rows: 670 Data size: 2680 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: hash(t) (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 670 Data size: 2680 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: sum(_col0)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT t, s FROM orc_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, s FROM orc_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+-1	bob laertes
+-1	bob young
+PREHOOK: query: SELECT t, s FROM orc_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, s FROM orc_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+-1	bob laertes
+-1	bob young
+PREHOOK: query: EXPLAIN SELECT t, s FROM orc_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, s FROM orc_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: orc_pred
+          Filter Operator
+            predicate: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
+            Select Operator
+              expressions: -1 (type: tinyint), s (type: string)
+              outputColumnNames: _col0, _col1
+              ListSink
+
+PREHOOK: query: EXPLAIN SELECT t, s FROM orc_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, s FROM orc_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: orc_pred
+          filterExpr: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
+          Filter Operator
+            predicate: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
+            Select Operator
+              expressions: -1 (type: tinyint), s (type: string)
+              outputColumnNames: _col0, _col1
+              ListSink
+
+PREHOOK: query: SELECT t, s FROM orc_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, s FROM orc_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+26	bob ovid
+26	bob quirinius
+27	bob ovid
+PREHOOK: query: SELECT t, s FROM orc_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, s FROM orc_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+26	bob ovid
+26	bob quirinius
+27	bob ovid
+PREHOOK: query: EXPLAIN SELECT t, s FROM orc_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, s FROM orc_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  Statistics: Num rows: 232 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
+                    Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), s (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT t, s FROM orc_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, s FROM orc_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  filterExpr: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
+                  Statistics: Num rows: 232 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
+                    Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), s (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 29 Data size: 3018 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT t, si, d, s FROM orc_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, si, d, s FROM orc_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+101	327	11.48	gabriella ellison
+15	334	11.12	jessica robinson
+7	320	11.54	bob ellison
+PREHOOK: query: SELECT t, si, d, s FROM orc_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, si, d, s FROM orc_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+101	327	11.48	gabriella ellison
+15	334	11.12	jessica robinson
+7	320	11.54	bob ellison
+PREHOOK: query: EXPLAIN SELECT t, si, d, s FROM orc_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, si, d, s FROM orc_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  Statistics: Num rows: 208 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col3 (type: string)
+                        sort order: -
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT t, si, d, s FROM orc_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, si, d, s FROM orc_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  filterExpr: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
+                  Statistics: Num rows: 208 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col3 (type: string)
+                        sort order: -
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT t, si, d, s FROM orc_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, si, d, s FROM orc_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+15	334	11.12	jessica robinson
+PREHOOK: query: SELECT t, si, d, s FROM orc_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, si, d, s FROM orc_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_pred
+#### A masked pattern was here ####
+15	334	11.12	jessica robinson
+PREHOOK: query: EXPLAIN SELECT t, si, d, s FROM orc_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, si, d, s FROM orc_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  Statistics: Num rows: 208 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((t > 10) and (t <> 101) and (d >= 10) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col3 (type: string)
+                        sort order: -
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col3 (type: string)
+                    sort order: -
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT t, si, d, s FROM orc_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, si, d, s FROM orc_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_pred
+                  filterExpr: ((t > 10) and (t <> 101) and (d >= 10) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
+                  Statistics: Num rows: 208 Data size: 24150 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((t > 10) and (t <> 101) and (d >= 10) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col3 (type: string)
+                        sort order: -
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+            Execution mode: llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col3 (type: string)
+                    sort order: -
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/orc_split_elimination.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_split_elimination.q.out b/ql/src/test/results/clientpositive/llap/orc_split_elimination.q.out
new file mode 100644
index 0000000..7134ff5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_split_elimination.q.out
@@ -0,0 +1,532 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_split_elim
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_split_elim
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_split_elim
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_split_elim
+PREHOOK: query: -- The above table will have 5 splits with the followings stats
+--  Stripe 1:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 2 max: 100 sum: 499902
+--    Column 2: count: 5000 min: foo max: zebra sum: 24998
+--    Column 3: count: 5000 min: 0.8 max: 8.0 sum: 39992.8
+--    Column 4: count: 5000 min: 0 max: 1.2 sum: 1.2
+--    Column 5: count: 5000
+--  Stripe 2:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 13 max: 100 sum: 499913
+--    Column 2: count: 5000 min: bar max: zebra sum: 24998
+--    Column 3: count: 5000 min: 8.0 max: 80.0 sum: 40072.0
+--    Column 4: count: 5000 min: 0 max: 2.2 sum: 2.2
+--    Column 5: count: 5000
+--  Stripe 3:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 29 max: 100 sum: 499929
+--    Column 2: count: 5000 min: cat max: zebra sum: 24998
+--    Column 3: count: 5000 min: 8.0 max: 8.0 sum: 40000.0
+--    Column 4: count: 5000 min: 0 max: 3.3 sum: 3.3
+--    Column 5: count: 5000
+--  Stripe 4:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 70 max: 100 sum: 499970
+--    Column 2: count: 5000 min: dog max: zebra sum: 24998
+--    Column 3: count: 5000 min: 1.8 max: 8.0 sum: 39993.8
+--    Column 4: count: 5000 min: 0 max: 4.4 sum: 4.4
+--    Column 5: count: 5000
+--  Stripe 5:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 5 max: 100 sum: 499905
+--    Column 2: count: 5000 min: eat max: zebra sum: 24998
+--    Column 3: count: 5000 min: 0.8 max: 8.0 sum: 39992.8
+--    Column 4: count: 5000 min: 0 max: 5.5 sum: 5.5
+--    Column 5: count: 5000
+
+-- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- The above table will have 5 splits with the followings stats
+--  Stripe 1:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 2 max: 100 sum: 499902
+--    Column 2: count: 5000 min: foo max: zebra sum: 24998
+--    Column 3: count: 5000 min: 0.8 max: 8.0 sum: 39992.8
+--    Column 4: count: 5000 min: 0 max: 1.2 sum: 1.2
+--    Column 5: count: 5000
+--  Stripe 2:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 13 max: 100 sum: 499913
+--    Column 2: count: 5000 min: bar max: zebra sum: 24998
+--    Column 3: count: 5000 min: 8.0 max: 80.0 sum: 40072.0
+--    Column 4: count: 5000 min: 0 max: 2.2 sum: 2.2
+--    Column 5: count: 5000
+--  Stripe 3:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 29 max: 100 sum: 499929
+--    Column 2: count: 5000 min: cat max: zebra sum: 24998
+--    Column 3: count: 5000 min: 8.0 max: 8.0 sum: 40000.0
+--    Column 4: count: 5000 min: 0 max: 3.3 sum: 3.3
+--    Column 5: count: 5000
+--  Stripe 4:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 70 max: 100 sum: 499970
+--    Column 2: count: 5000 min: dog max: zebra sum: 24998
+--    Column 3: count: 5000 min: 1.8 max: 8.0 sum: 39993.8
+--    Column 4: count: 5000 min: 0 max: 4.4 sum: 4.4
+--    Column 5: count: 5000
+--  Stripe 5:
+--    Column 0: count: 5000
+--    Column 1: count: 5000 min: 5 max: 100 sum: 499905
+--    Column 2: count: 5000 min: eat max: zebra sum: 24998
+--    Column 3: count: 5000 min: 0.8 max: 8.0 sum: 39992.8
+--    Column 4: count: 5000 min: 0 max: 5.5 sum: 5.5
+--    Column 5: count: 5000
+
+-- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+PREHOOK: query: -- 0 mapper
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 0 mapper
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+PREHOOK: query: -- 5 mappers. count should be 0
+select count(*) from orc_split_elim where userid<=0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 5 mappers. count should be 0
+select count(*) from orc_split_elim where userid<=0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+0
+PREHOOK: query: -- 0 mapper
+select count(*) from orc_split_elim where userid<=0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 0 mapper
+select count(*) from orc_split_elim where userid<=0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+0
+PREHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: -- 1 mapper
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 1 mapper
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 2 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 2 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+13	bar	80.0	2	1969-12-31 16:00:05
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 3 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 3 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+13	bar	80.0	2	1969-12-31 16:00:05
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+13	bar	80.0	2	1969-12-31 16:00:05
+2	foo	0.8	1	1969-12-31 16:00:00
+29	cat	8.0	3	1969-12-31 16:00:10
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 4 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 4 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+13	bar	80.0	2	1969-12-31 16:00:05
+2	foo	0.8	1	1969-12-31 16:00:00
+29	cat	8.0	3	1969-12-31 16:00:10
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+13	bar	80.0	2	1969-12-31 16:00:05
+2	foo	0.8	1	1969-12-31 16:00:00
+29	cat	8.0	3	1969-12-31 16:00:10
+5	eat	0.8	6	1969-12-31 16:00:20
+70	dog	1.8	4	1969-12-31 16:00:15
+PREHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+POSTHOOK: query: -- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim
+#### A masked pattern was here ####
+13	bar	80.0	2	1969-12-31 16:00:05
+2	foo	0.8	1	1969-12-31 16:00:00
+29	cat	8.0	3	1969-12-31 16:00:10
+5	eat	0.8	6	1969-12-31 16:00:20
+70	dog	1.8	4	1969-12-31 16:00:15
+PREHOOK: query: -- partitioned table
+create table orc_split_elim_part (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (country string, year int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_split_elim_part
+POSTHOOK: query: -- partitioned table
+create table orc_split_elim_part (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (country string, year int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_split_elim_part
+PREHOOK: query: alter table orc_split_elim_part add partition(country='us', year=2000)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@orc_split_elim_part
+POSTHOOK: query: alter table orc_split_elim_part add partition(country='us', year=2000)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@orc_split_elim_part
+POSTHOOK: Output: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: query: alter table orc_split_elim_part add partition(country='us', year=2001)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@orc_split_elim_part
+POSTHOOK: query: alter table orc_split_elim_part add partition(country='us', year=2001)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@orc_split_elim_part
+POSTHOOK: Output: default@orc_split_elim_part@country=us/year=2001
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_part partition(country='us', year=2000)
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_split_elim_part@country=us/year=2000
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_part partition(country='us', year=2000)
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_part partition(country='us', year=2001)
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_split_elim_part@country=us/year=2001
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_part partition(country='us', year=2001)
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_split_elim_part@country=us/year=2001
+PREHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: -- 2 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: -- 2 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: -- 2 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: -- 2 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+#### A masked pattern was here ####
+POSTHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: -- 1 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+#### A masked pattern was here ####
+POSTHOOK: query: -- 1 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+PREHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 4 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: -- 4 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 4 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: -- 4 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2001
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+#### A masked pattern was here ####
+POSTHOOK: query: -- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 2 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+PREHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+#### A masked pattern was here ####
+POSTHOOK: query: -- 2 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+POSTHOOK: Input: default@orc_split_elim_part@country=us/year=2000
+#### A masked pattern was here ####
+2	foo	0.8	1	1969-12-31 16:00:00
+5	eat	0.8	6	1969-12-31 16:00:20
+PREHOOK: query: -- 0 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 0 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+#### A masked pattern was here ####
+PREHOOK: query: select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+#### A masked pattern was here ####
+POSTHOOK: query: select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+#### A masked pattern was here ####
+PREHOOK: query: -- 0 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+#### A masked pattern was here ####
+POSTHOOK: query: -- 0 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+#### A masked pattern was here ####
+PREHOOK: query: select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_split_elim_part
+#### A masked pattern was here ####
+POSTHOOK: query: select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_split_elim_part
+#### A masked pattern was here ####


[13/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/parquet_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/parquet_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/llap/parquet_predicate_pushdown.q.out
new file mode 100644
index 0000000..6541772
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/parquet_predicate_pushdown.q.out
@@ -0,0 +1,1277 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+CREATE TABLE tbl_pred(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS PARQUET
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_pred
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+CREATE TABLE tbl_pred(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS PARQUET
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_pred
+PREHOOK: query: CREATE TABLE staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@staging
+POSTHOOK: query: CREATE TABLE staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@staging
+PREHOOK: query: INSERT INTO TABLE tbl_pred select * from staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@tbl_pred
+POSTHOOK: query: INSERT INTO TABLE tbl_pred select * from staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@tbl_pred
+POSTHOOK: Lineage: tbl_pred.b SIMPLE [(staging)staging.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: tbl_pred.bin SIMPLE [(staging)staging.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: tbl_pred.bo SIMPLE [(staging)staging.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: tbl_pred.d SIMPLE [(staging)staging.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: tbl_pred.dec SIMPLE [(staging)staging.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: tbl_pred.f SIMPLE [(staging)staging.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: tbl_pred.i SIMPLE [(staging)staging.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: tbl_pred.s SIMPLE [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: tbl_pred.si SIMPLE [(staging)staging.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: tbl_pred.t SIMPLE [(staging)staging.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: tbl_pred.ts SIMPLE [(staging)staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+PREHOOK: query: -- no predicate case. the explain plan should not have filter expression in table scan operator
+
+SELECT SUM(HASH(t)) FROM tbl_pred
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: -- no predicate case. the explain plan should not have filter expression in table scan operator
+
+SELECT SUM(HASH(t)) FROM tbl_pred
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+62430
+PREHOOK: query: SELECT SUM(HASH(t)) FROM tbl_pred
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(t)) FROM tbl_pred
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+62430
+PREHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tbl_pred
+                  Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hash(t) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: sum(_col0)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT SUM(HASH(t)) FROM tbl_pred
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tbl_pred
+                  Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hash(t) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: sum(_col0)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- all the following queries have predicates which are pushed down to table scan operator if
+-- hive.optimize.index.filter is set to true. the explain plan should show filter expression
+-- in table scan operator.
+
+SELECT * FROM tbl_pred WHERE t<2 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: -- all the following queries have predicates which are pushed down to table scan operator if
+-- hive.optimize.index.filter is set to true. the explain plan should show filter expression
+-- in table scan operator.
+
+SELECT * FROM tbl_pred WHERE t<2 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+-3	467	65575	4294967437	81.64	23.53	true	tom hernandez	2013-03-01 09:11:58.703188	32.85	study skills
+PREHOOK: query: SELECT * FROM tbl_pred WHERE t<2 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM tbl_pred WHERE t<2 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+-3	467	65575	4294967437	81.64	23.53	true	tom hernandez	2013-03-01 09:11:58.703188	32.85	study skills
+PREHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
+PREHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM tbl_pred WHERE t>2 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+124	336	65664	4294967435	74.72	42.47	true	bob davidson	2013-03-01 09:11:58.703302	45.40	yard duty
+PREHOOK: query: SELECT * FROM tbl_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+  LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM tbl_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+  LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+-1	268	65778	4294967418	56.33	44.73	true	calvin falkner	2013-03-01 09:11:58.70322	7.37	history
+-1	281	65643	4294967323	15.1	45.0	false	irene nixon	2013-03-01 09:11:58.703223	80.96	undecided
+-1	300	65663	4294967343	71.26	34.62	true	calvin ovid	2013-03-01 09:11:58.703262	78.56	study skills
+-1	348	65556	4294967413	35.17	9.51	false	bob young	2013-03-01 09:11:58.70328	45.81	quiet hour
+-1	372	65680	4294967490	15.45	18.09	false	ethan laertes	2013-03-01 09:11:58.70311	65.88	opthamology
+-1	417	65685	4294967492	28.89	5.19	true	mike white	2013-03-01 09:11:58.703275	90.69	forestry
+-1	423	65663	4294967380	0.79	21.33	false	bob laertes	2013-03-01 09:11:58.703278	94.16	debate
+-1	433	65581	4294967299	86.92	23.15	false	yuri ellison	2013-03-01 09:11:58.703098	21.29	history
+PREHOOK: query: SELECT * FROM tbl_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+  LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM tbl_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+  LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+-1	268	65778	4294967418	56.33	44.73	true	calvin falkner	2013-03-01 09:11:58.70322	7.37	history
+-1	281	65643	4294967323	15.1	45.0	false	irene nixon	2013-03-01 09:11:58.703223	80.96	undecided
+-1	300	65663	4294967343	71.26	34.62	true	calvin ovid	2013-03-01 09:11:58.703262	78.56	study skills
+-1	348	65556	4294967413	35.17	9.51	false	bob young	2013-03-01 09:11:58.70328	45.81	quiet hour
+-1	372	65680	4294967490	15.45	18.09	false	ethan laertes	2013-03-01 09:11:58.70311	65.88	opthamology
+-1	417	65685	4294967492	28.89	5.19	true	mike white	2013-03-01 09:11:58.703275	90.69	forestry
+-1	423	65663	4294967380	0.79	21.33	false	bob laertes	2013-03-01 09:11:58.703278	94.16	debate
+-1	433	65581	4294967299	86.92	23.15	false	yuri ellison	2013-03-01 09:11:58.703098	21.29	history
+PREHOOK: query: EXPLAIN SELECT * FROM tbl_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+  LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * FROM tbl_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+  LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        TableScan
+          alias: tbl_pred
+          Filter Operator
+            predicate: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean)
+            Select Operator
+              expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+              Limit
+                Number of rows: 10
+                ListSink
+
+PREHOOK: query: EXPLAIN SELECT * FROM tbl_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+  LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * FROM tbl_pred
+  WHERE t IS NOT NULL
+  AND t < 0
+  AND t > -2
+  LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        TableScan
+          alias: tbl_pred
+          filterExpr: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean)
+          Filter Operator
+            predicate: ((t < 0) and (UDFToInteger(t) > -2)) (type: boolean)
+            Select Operator
+              expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+              Limit
+                Number of rows: 10
+                ListSink
+
+PREHOOK: query: SELECT t, s FROM tbl_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, s FROM tbl_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+-1	bob laertes
+-1	bob young
+PREHOOK: query: SELECT t, s FROM tbl_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, s FROM tbl_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+-1	bob laertes
+-1	bob young
+PREHOOK: query: EXPLAIN SELECT t, s FROM tbl_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, s FROM tbl_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: tbl_pred
+          Filter Operator
+            predicate: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
+            Select Operator
+              expressions: -1 (type: tinyint), s (type: string)
+              outputColumnNames: _col0, _col1
+              ListSink
+
+PREHOOK: query: EXPLAIN SELECT t, s FROM tbl_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, s FROM tbl_pred
+  WHERE t <=> -1
+  AND s IS NOT NULL
+  AND s LIKE 'bob%'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: tbl_pred
+          filterExpr: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
+          Filter Operator
+            predicate: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
+            Select Operator
+              expressions: -1 (type: tinyint), s (type: string)
+              outputColumnNames: _col0, _col1
+              ListSink
+
+PREHOOK: query: SELECT t, s FROM tbl_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, s FROM tbl_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+26	bob ovid
+26	bob quirinius
+27	bob ovid
+PREHOOK: query: SELECT t, s FROM tbl_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, s FROM tbl_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+26	bob ovid
+26	bob quirinius
+27	bob ovid
+PREHOOK: query: EXPLAIN SELECT t, s FROM tbl_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, s FROM tbl_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tbl_pred
+                  Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
+                    Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), s (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT t, s FROM tbl_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, s FROM tbl_pred
+  WHERE s IS NOT NULL
+  AND s LIKE 'bob%'
+  AND t NOT IN (-1,-2,-3)
+  AND t BETWEEN 25 AND 30
+  SORT BY t,s
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tbl_pred
+                  filterExpr: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
+                  Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
+                    Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), s (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT t, si, d, s FROM tbl_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, si, d, s FROM tbl_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+101	327	11.48	gabriella ellison
+15	334	11.12	jessica robinson
+7	320	11.54	bob ellison
+PREHOOK: query: SELECT t, si, d, s FROM tbl_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, si, d, s FROM tbl_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+101	327	11.48	gabriella ellison
+15	334	11.12	jessica robinson
+7	320	11.54	bob ellison
+PREHOOK: query: EXPLAIN SELECT t, si, d, s FROM tbl_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, si, d, s FROM tbl_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tbl_pred
+                  Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
+                    Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col3 (type: string)
+                        sort order: -
+                        Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 3 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 3 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT t, si, d, s FROM tbl_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, si, d, s FROM tbl_pred
+  WHERE d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  ORDER BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tbl_pred
+                  filterExpr: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
+                  Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
+                    Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col3 (type: string)
+                        sort order: -
+                        Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 3 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 3 Data size: 33 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT t, si, d, s FROM tbl_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, si, d, s FROM tbl_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+15	334	11.12	jessica robinson
+PREHOOK: query: SELECT t, si, d, s FROM tbl_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, si, d, s FROM tbl_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+15	334	11.12	jessica robinson
+PREHOOK: query: SELECT f, i, b FROM tbl_pred
+  WHERE f IS NOT NULL
+  AND f < 123.2
+  AND f > 1.92
+  AND f >= 9.99
+  AND f BETWEEN 1.92 AND 123.2
+  AND i IS NOT NULL
+  AND i < 67627
+  AND i > 60627
+  AND i >= 60626
+  AND i BETWEEN 60626 AND 67627
+  AND b IS NOT NULL
+  AND b < 4294967861
+  AND b > 4294967261
+  AND b >= 4294967260
+  AND b BETWEEN 4294967261 AND 4294967861
+  SORT BY f DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT f, i, b FROM tbl_pred
+  WHERE f IS NOT NULL
+  AND f < 123.2
+  AND f > 1.92
+  AND f >= 9.99
+  AND f BETWEEN 1.92 AND 123.2
+  AND i IS NOT NULL
+  AND i < 67627
+  AND i > 60627
+  AND i >= 60626
+  AND i BETWEEN 60626 AND 67627
+  AND b IS NOT NULL
+  AND b < 4294967861
+  AND b > 4294967261
+  AND b >= 4294967260
+  AND b BETWEEN 4294967261 AND 4294967861
+  SORT BY f DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_pred
+#### A masked pattern was here ####
+99.68	65658	4294967503
+99.91	65763	4294967324
+99.92	65661	4294967404
+PREHOOK: query: EXPLAIN SELECT t, si, d, s FROM tbl_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, si, d, s FROM tbl_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tbl_pred
+                  Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((t > 10) and (t <> 101) and (d >= 10) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col3 (type: string)
+                        sort order: -
+                        Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col3 (type: string)
+                    sort order: -
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT t, si, d, s FROM tbl_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT t, si, d, s FROM tbl_pred
+  WHERE t > 10
+  AND t <> 101
+  AND d >= ROUND(9.99)
+  AND d < 12
+  AND t IS NOT NULL
+  AND s LIKE '%son'
+  AND s NOT LIKE '%car%'
+  AND t > 0
+  AND si BETWEEN 300 AND 400
+  SORT BY s DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tbl_pred
+                  filterExpr: ((t > 10) and (t <> 101) and (d >= 10) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
+                  Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((t > 10) and (t <> 101) and (d >= 10) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col3 (type: string)
+                        sort order: -
+                        Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col3 (type: string)
+                    sort order: -
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT f, i, b FROM tbl_pred
+  WHERE f IS NOT NULL
+  AND f < 123.2
+  AND f > 1.92
+  AND f >= 9.99
+  AND f BETWEEN 1.92 AND 123.2
+  AND i IS NOT NULL
+  AND i < 67627
+  AND i > 60627
+  AND i >= 60626
+  AND i BETWEEN 60626 AND 67627
+  AND b IS NOT NULL
+  AND b < 4294967861
+  AND b > 4294967261
+  AND b >= 4294967260
+  AND b BETWEEN 4294967261 AND 4294967861
+  SORT BY f DESC
+  LIMIT 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT f, i, b FROM tbl_pred
+  WHERE f IS NOT NULL
+  AND f < 123.2
+  AND f > 1.92
+  AND f >= 9.99
+  AND f BETWEEN 1.92 AND 123.2
+  AND i IS NOT NULL
+  AND i < 67627
+  AND i > 60627
+  AND i >= 60626
+  AND i BETWEEN 60626 AND 67627
+  AND b IS NOT NULL
+  AND b < 4294967861
+  AND b > 4294967261
+  AND b >= 4294967260
+  AND b BETWEEN 4294967261 AND 4294967861
+  SORT BY f DESC
+  LIMIT 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tbl_pred
+                  filterExpr: ((f < 123.2) and (f > 1.92) and (f >= 9.99) and f BETWEEN 1.92 AND 123.2 and (i < 67627) and (i > 60627) and (i >= 60626) and i BETWEEN 60626 AND 67627 and (b < 4294967861) and (b > 4294967261) and (b >= 4294967260) and b BETWEEN 4294967261 AND 4294967861) (type: boolean)
+                  Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((f < 123.2) and (f > 1.92) and (f >= 9.99) and f BETWEEN 1.92 AND 123.2 and (i < 67627) and (i > 60627) and (i >= 60626) and i BETWEEN 60626 AND 67627 and (b < 4294967861) and (b > 4294967261) and (b >= 4294967260) and b BETWEEN 4294967261 AND 4294967861) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: f (type: float), i (type: int), b (type: bigint)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: float)
+                        sort order: -
+                        Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                        TopN Hash Memory Usage: 0.1
+                        value expressions: _col1 (type: int), _col2 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: float), VALUE._col0 (type: int), VALUE._col1 (type: bigint)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: float)
+                    sort order: -
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: int), _col2 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: float), VALUE._col0 (type: int), VALUE._col1 (type: bigint)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 3
+                  Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/parquet_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/parquet_types.q.out b/ql/src/test/results/clientpositive/llap/parquet_types.q.out
new file mode 100644
index 0000000..ad743ef
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/parquet_types.q.out
@@ -0,0 +1,379 @@
+PREHOOK: query: DROP TABLE parquet_types_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_types_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE parquet_types
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_types
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE parquet_types_staging (
+  cint int,
+  ctinyint tinyint,
+  csmallint smallint,
+  cfloat float,
+  cdouble double,
+  cstring1 string,
+  t timestamp,
+  cchar char(5),
+  cvarchar varchar(10),
+  cbinary string,
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>,
+  d date
+) ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_types_staging
+POSTHOOK: query: CREATE TABLE parquet_types_staging (
+  cint int,
+  ctinyint tinyint,
+  csmallint smallint,
+  cfloat float,
+  cdouble double,
+  cstring1 string,
+  t timestamp,
+  cchar char(5),
+  cvarchar varchar(10),
+  cbinary string,
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>,
+  d date
+) ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_types_staging
+PREHOOK: query: CREATE TABLE parquet_types (
+  cint int,
+  ctinyint tinyint,
+  csmallint smallint,
+  cfloat float,
+  cdouble double,
+  cstring1 string,
+  t timestamp,
+  cchar char(5),
+  cvarchar varchar(10),
+  cbinary binary,
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>,
+  d date
+) STORED AS PARQUET
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_types
+POSTHOOK: query: CREATE TABLE parquet_types (
+  cint int,
+  ctinyint tinyint,
+  csmallint smallint,
+  cfloat float,
+  cdouble double,
+  cstring1 string,
+  t timestamp,
+  cchar char(5),
+  cvarchar varchar(10),
+  cbinary binary,
+  m1 map<string, varchar(3)>,
+  l1 array<int>,
+  st1 struct<c1:int, c2:char(1)>,
+  d date
+) STORED AS PARQUET
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_types
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@parquet_types_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@parquet_types_staging
+PREHOOK: query: SELECT * FROM parquet_types_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types_staging
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM parquet_types_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types_staging
+#### A masked pattern was here ####
+100	1	1	1.0	0.0	abc	2011-01-01 01:01:01.111111111	a    	a  	B4F3CAFDBEDD	{"k1":"v1"}	[101,200]	{"c1":10,"c2":"a"}	2011-01-01
+101	2	2	1.1	0.3	def	2012-02-02 02:02:02.222222222	ab   	ab 	68692CCAC0BDE7	{"k2":"v2"}	[102,200]	{"c1":10,"c2":"d"}	2012-02-02
+102	3	3	1.2	0.6	ghi	2013-03-03 03:03:03.333333333	abc  	abc	B4F3CAFDBEDD	{"k3":"v3"}	[103,200]	{"c1":10,"c2":"g"}	2013-03-03
+103	1	4	1.3	0.9	jkl	2014-04-04 04:04:04.444444444	abcd 	abcd	68692CCAC0BDE7	{"k4":"v4"}	[104,200]	{"c1":10,"c2":"j"}	2014-04-04
+104	2	5	1.4	1.2	mno	2015-05-05 05:05:05.555555555	abcde	abcde	B4F3CAFDBEDD	{"k5":"v5"}	[105,200]	{"c1":10,"c2":"m"}	2015-05-05
+105	3	1	1.0	1.5	pqr	2016-06-06 06:06:06.666666666	abcde	abcdef	68692CCAC0BDE7	{"k6":"v6"}	[106,200]	{"c1":10,"c2":"p"}	2016-06-06
+106	1	2	1.1	1.8	stu	2017-07-07 07:07:07.777777777	abcde	abcdefg	B4F3CAFDBEDD	{"k7":"v7"}	[107,200]	{"c1":10,"c2":"s"}	2017-07-07
+107	2	3	1.2	2.1	vwx	2018-08-08 08:08:08.888888888	bcdef	abcdefgh	68692CCAC0BDE7	{"k8":"v8"}	[108,200]	{"c1":10,"c2":"v"}	2018-08-08
+108	3	4	1.3	2.4	yza	2019-09-09 09:09:09.999999999	cdefg	B4F3CAFDBE	68656C6C6F	{"k9":"v9"}	[109,200]	{"c1":10,"c2":"y"}	2019-09-09
+109	1	5	1.4	2.7	bcd	2020-10-10 10:10:10.101010101	klmno	abcdedef	68692CCAC0BDE7	{"k10":"v10"}	[110,200]	{"c1":10,"c2":"b"}	2020-10-10
+110	2	1	1.0	3.0	efg	2021-11-11 11:11:11.111111111	pqrst	abcdede	B4F3CAFDBEDD	{"k11":"v11"}	[111,200]	{"c1":10,"c2":"e"}	2021-11-11
+111	3	2	1.1	3.3	hij	2022-12-12 12:12:12.121212121	nopqr	abcded	68692CCAC0BDE7	{"k12":"v12"}	[112,200]	{"c1":10,"c2":"h"}	2022-12-12
+112	1	3	1.2	3.6	klm	2023-01-02 13:13:13.131313131	opqrs	abcdd	B4F3CAFDBEDD	{"k13":"v13"}	[113,200]	{"c1":10,"c2":"k"}	2023-01-02
+113	2	4	1.3	3.9	nop	2024-02-02 14:14:14.141414141	pqrst	abc	68692CCAC0BDE7	{"k14":"v14"}	[114,200]	{"c1":10,"c2":"n"}	2024-02-02
+114	3	5	1.4	4.2	qrs	2025-03-03 15:15:15.151515151	qrstu	b	B4F3CAFDBEDD	{"k15":"v15"}	[115,200]	{"c1":10,"c2":"q"}	2025-03-03
+115	1	1	1.0	4.5	qrs	2026-04-04 16:16:16.161616161	rstuv	abcded	68692CCAC0BDE7	{"k16":"v16"}	[116,200]	{"c1":10,"c2":"q"}	2026-04-04
+116	2	2	1.1	4.8	wxy	2027-05-05 17:17:17.171717171	stuvw	abcded	B4F3CAFDBEDD	{"k17":"v17"}	[117,200]	{"c1":10,"c2":"w"}	2027-05-05
+117	3	3	1.2	5.1	zab	2028-06-06 18:18:18.181818181	tuvwx	abcded	68692CCAC0BDE7	{"k18":"v18"}	[118,200]	{"c1":10,"c2":"z"}	2028-06-06
+118	1	4	1.3	5.4	cde	2029-07-07 19:19:19.191919191	uvwzy	abcdede	B4F3CAFDBEDD	{"k19":"v19"}	[119,200]	{"c1":10,"c2":"c"}	2029-07-07
+119	2	5	1.4	5.7	fgh	2030-08-08 20:20:20.202020202	vwxyz	abcdede	68692CCAC0BDE7	{"k20":"v20"}	[120,200]	{"c1":10,"c2":"f"}	2030-08-08
+120	3	1	1.0	6.0	ijk	2031-09-09 21:21:21.212121212	wxyza	abcde	B4F3CAFDBEDD	{"k21":"v21"}	[121,200]	{"c1":10,"c2":"i"}	2031-09-09
+121	1	2	1.1	6.3	lmn	2032-10-10 22:22:22.222222222	bcdef	abcde		{"k22":"v22"}	[122,200]	{"c1":10,"c2":"l"}	2032-10-10
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_types
+SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
+unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types_staging
+PREHOOK: Output: default@parquet_types
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_types
+SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
+unhex(cbinary), m1, l1, st1, d FROM parquet_types_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types_staging
+POSTHOOK: Output: default@parquet_types
+POSTHOOK: Lineage: parquet_types.cbinary EXPRESSION [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cbinary, type:string, comment:null), ]
+POSTHOOK: Lineage: parquet_types.cchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cchar, type:char(5), comment:null), ]
+POSTHOOK: Lineage: parquet_types.cdouble SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: parquet_types.cfloat SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: parquet_types.cint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_types.csmallint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: parquet_types.cstring1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: parquet_types.ctinyint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: parquet_types.cvarchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ]
+POSTHOOK: Lineage: parquet_types.d SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:d, type:date, comment:null), ]
+POSTHOOK: Lineage: parquet_types.l1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:l1, type:array<int>, comment:null), ]
+POSTHOOK: Lineage: parquet_types.m1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:m1, type:map<string,varchar(3)>, comment:null), ]
+POSTHOOK: Lineage: parquet_types.st1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:st1, type:struct<c1:int,c2:char(1)>, comment:null), ]
+POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ]
+PREHOOK: query: SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
+hex(cbinary), m1, l1, st1, d FROM parquet_types
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cint, ctinyint, csmallint, cfloat, cdouble, cstring1, t, cchar, cvarchar,
+hex(cbinary), m1, l1, st1, d FROM parquet_types
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+100	1	1	1.0	0.0	abc	2011-01-01 01:01:01.111111111	a    	a  	B4F3CAFDBEDD	{"k1":"v1"}	[101,200]	{"c1":10,"c2":"a"}	2011-01-01
+101	2	2	1.1	0.3	def	2012-02-02 02:02:02.222222222	ab   	ab 	68692CCAC0BDE7	{"k2":"v2"}	[102,200]	{"c1":10,"c2":"d"}	2012-02-02
+102	3	3	1.2	0.6	ghi	2013-03-03 03:03:03.333333333	abc  	abc	B4F3CAFDBEDD	{"k3":"v3"}	[103,200]	{"c1":10,"c2":"g"}	2013-03-03
+103	1	4	1.3	0.9	jkl	2014-04-04 04:04:04.444444444	abcd 	abcd	68692CCAC0BDE7	{"k4":"v4"}	[104,200]	{"c1":10,"c2":"j"}	2014-04-04
+104	2	5	1.4	1.2	mno	2015-05-05 05:05:05.555555555	abcde	abcde	B4F3CAFDBEDD	{"k5":"v5"}	[105,200]	{"c1":10,"c2":"m"}	2015-05-05
+105	3	1	1.0	1.5	pqr	2016-06-06 06:06:06.666666666	abcde	abcdef	68692CCAC0BDE7	{"k6":"v6"}	[106,200]	{"c1":10,"c2":"p"}	2016-06-06
+106	1	2	1.1	1.8	stu	2017-07-07 07:07:07.777777777	abcde	abcdefg	B4F3CAFDBEDD	{"k7":"v7"}	[107,200]	{"c1":10,"c2":"s"}	2017-07-07
+107	2	3	1.2	2.1	vwx	2018-08-08 08:08:08.888888888	bcdef	abcdefgh	68692CCAC0BDE7	{"k8":"v8"}	[108,200]	{"c1":10,"c2":"v"}	2018-08-08
+108	3	4	1.3	2.4	yza	2019-09-09 09:09:09.999999999	cdefg	B4F3CAFDBE	68656C6C6F	{"k9":"v9"}	[109,200]	{"c1":10,"c2":"y"}	2019-09-09
+109	1	5	1.4	2.7	bcd	2020-10-10 10:10:10.101010101	klmno	abcdedef	68692CCAC0BDE7	{"k10":"v10"}	[110,200]	{"c1":10,"c2":"b"}	2020-10-10
+110	2	1	1.0	3.0	efg	2021-11-11 11:11:11.111111111	pqrst	abcdede	B4F3CAFDBEDD	{"k11":"v11"}	[111,200]	{"c1":10,"c2":"e"}	2021-11-11
+111	3	2	1.1	3.3	hij	2022-12-12 12:12:12.121212121	nopqr	abcded	68692CCAC0BDE7	{"k12":"v12"}	[112,200]	{"c1":10,"c2":"h"}	2022-12-12
+112	1	3	1.2	3.6	klm	2023-01-02 13:13:13.131313131	opqrs	abcdd	B4F3CAFDBEDD	{"k13":"v13"}	[113,200]	{"c1":10,"c2":"k"}	2023-01-02
+113	2	4	1.3	3.9	nop	2024-02-02 14:14:14.141414141	pqrst	abc	68692CCAC0BDE7	{"k14":"v14"}	[114,200]	{"c1":10,"c2":"n"}	2024-02-02
+114	3	5	1.4	4.2	qrs	2025-03-03 15:15:15.151515151	qrstu	b	B4F3CAFDBEDD	{"k15":"v15"}	[115,200]	{"c1":10,"c2":"q"}	2025-03-03
+115	1	1	1.0	4.5	qrs	2026-04-04 16:16:16.161616161	rstuv	abcded	68692CCAC0BDE7	{"k16":"v16"}	[116,200]	{"c1":10,"c2":"q"}	2026-04-04
+116	2	2	1.1	4.8	wxy	2027-05-05 17:17:17.171717171	stuvw	abcded	B4F3CAFDBEDD	{"k17":"v17"}	[117,200]	{"c1":10,"c2":"w"}	2027-05-05
+117	3	3	1.2	5.1	zab	2028-06-06 18:18:18.181818181	tuvwx	abcded	68692CCAC0BDE7	{"k18":"v18"}	[118,200]	{"c1":10,"c2":"z"}	2028-06-06
+118	1	4	1.3	5.4	cde	2029-07-07 19:19:19.191919191	uvwzy	abcdede	B4F3CAFDBEDD	{"k19":"v19"}	[119,200]	{"c1":10,"c2":"c"}	2029-07-07
+119	2	5	1.4	5.7	fgh	2030-08-08 20:20:20.202020202	vwxyz	abcdede	68692CCAC0BDE7	{"k20":"v20"}	[120,200]	{"c1":10,"c2":"f"}	2030-08-08
+120	3	1	1.0	6.0	ijk	2031-09-09 21:21:21.212121212	wxyza	abcde	B4F3CAFDBEDD	{"k21":"v21"}	[121,200]	{"c1":10,"c2":"i"}	2031-09-09
+121	1	2	1.1	6.3	lmn	2032-10-10 22:22:22.222222222	bcdef	abcde		{"k22":"v22"}	[122,200]	{"c1":10,"c2":"l"}	2032-10-10
+PREHOOK: query: SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+a    	1	a  	3
+ab   	2	ab 	3
+abc  	3	abc	3
+abcd 	4	abcd	4
+abcde	5	abcde	5
+abcde	5	abcdef	6
+abcde	5	abcdefg	7
+bcdef	5	abcdefgh	8
+cdefg	5	B4F3CAFDBE	10
+klmno	5	abcdedef	8
+pqrst	5	abcdede	7
+nopqr	5	abcded	6
+opqrs	5	abcdd	5
+pqrst	5	abc	3
+qrstu	5	b	1
+rstuv	5	abcded	6
+stuvw	5	abcded	6
+tuvwx	5	abcded	6
+uvwzy	5	abcdede	7
+vwxyz	5	abcdede	7
+wxyza	5	abcde	5
+bcdef	5	abcde	5
+PREHOOK: query: -- test types in group by
+
+SELECT ctinyint,
+  MAX(cint),
+  MIN(csmallint),
+  COUNT(cstring1),
+  ROUND(AVG(cfloat), 5),
+  ROUND(STDDEV_POP(cdouble),5)
+FROM parquet_types
+GROUP BY ctinyint
+ORDER BY ctinyint
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: -- test types in group by
+
+SELECT ctinyint,
+  MAX(cint),
+  MIN(csmallint),
+  COUNT(cstring1),
+  ROUND(AVG(cfloat), 5),
+  ROUND(STDDEV_POP(cdouble),5)
+FROM parquet_types
+GROUP BY ctinyint
+ORDER BY ctinyint
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+1	121	1	8	1.175	2.06216
+2	119	1	7	1.21429	1.8
+3	120	1	7	1.17143	1.8
+PREHOOK: query: SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cfloat, count(*) FROM parquet_types GROUP BY cfloat ORDER BY cfloat
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+1.0	5
+1.1	5
+1.2	4
+1.3	4
+1.4	4
+PREHOOK: query: SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cchar, count(*) FROM parquet_types GROUP BY cchar ORDER BY cchar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+a    	1
+ab   	1
+abc  	1
+abcd 	1
+abcde	3
+bcdef	2
+cdefg	1
+klmno	1
+nopqr	1
+opqrs	1
+pqrst	2
+qrstu	1
+rstuv	1
+stuvw	1
+tuvwx	1
+uvwzy	1
+vwxyz	1
+wxyza	1
+PREHOOK: query: SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cvarchar, count(*) FROM parquet_types GROUP BY cvarchar ORDER BY cvarchar
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+B4F3CAFDBE	1
+a  	1
+ab 	1
+abc	2
+abcd	1
+abcdd	1
+abcde	3
+abcded	4
+abcdede	3
+abcdedef	1
+abcdef	1
+abcdefg	1
+abcdefgh	1
+b	1
+PREHOOK: query: SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT cstring1, count(*) FROM parquet_types GROUP BY cstring1 ORDER BY cstring1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+abc	1
+bcd	1
+cde	1
+def	1
+efg	1
+fgh	1
+ghi	1
+hij	1
+ijk	1
+jkl	1
+klm	1
+lmn	1
+mno	1
+nop	1
+pqr	1
+qrs	2
+stu	1
+vwx	1
+wxy	1
+yza	1
+zab	1
+PREHOOK: query: SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t, count(*) FROM parquet_types GROUP BY t ORDER BY t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+2011-01-01 01:01:01.111111111	1
+2012-02-02 02:02:02.222222222	1
+2013-03-03 03:03:03.333333333	1
+2014-04-04 04:04:04.444444444	1
+2015-05-05 05:05:05.555555555	1
+2016-06-06 06:06:06.666666666	1
+2017-07-07 07:07:07.777777777	1
+2018-08-08 08:08:08.888888888	1
+2019-09-09 09:09:09.999999999	1
+2020-10-10 10:10:10.101010101	1
+2021-11-11 11:11:11.111111111	1
+2022-12-12 12:12:12.121212121	1
+2023-01-02 13:13:13.131313131	1
+2024-02-02 14:14:14.141414141	1
+2025-03-03 15:15:15.151515151	1
+2026-04-04 16:16:16.161616161	1
+2027-05-05 17:17:17.171717171	1
+2028-06-06 18:18:18.181818181	1
+2029-07-07 19:19:19.191919191	1
+2030-08-08 20:20:20.202020202	1
+2031-09-09 21:21:21.212121212	1
+2032-10-10 22:22:22.222222222	1
+PREHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT hex(cbinary), count(*) FROM parquet_types GROUP BY cbinary
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_types
+#### A masked pattern was here ####
+	1
+68656C6C6F	1
+68692CCAC0BDE7	10
+B4F3CAFDBEDD	10


[25/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/correlationoptimizer3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/correlationoptimizer3.q.out b/ql/src/test/results/clientpositive/llap/correlationoptimizer3.q.out
new file mode 100644
index 0000000..74b3d6c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer3.q.out
@@ -0,0 +1,1388 @@
+PREHOOK: query: -- When Correlation Optimizer is turned off, 5 MR jobs will be generated.
+-- When Correlation Optimizer is turned on, the subquery tmp will be evalauted
+-- in a single MR job (including the subquery b, the subquery d, and b join d).
+-- At the reduce side of the MR job evaluating tmp, two operation paths
+-- (for subquery b and d) have different depths. The path starting from subquery b
+-- is JOIN->GBY->JOIN, which has a depth of 3. While, the path starting from subquery d
+-- is JOIN->JOIN. We should be able to handle this case.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- When Correlation Optimizer is turned off, 5 MR jobs will be generated.
+-- When Correlation Optimizer is turned on, the subquery tmp will be evalauted
+-- in a single MR job (including the subquery b, the subquery d, and b join d).
+-- At the reduce side of the MR job evaluating tmp, two operation paths
+-- (for subquery b and d) have different depths. The path starting from subquery b
+-- is JOIN->GBY->JOIN, which has a depth of 3. While, the path starting from subquery d
+-- is JOIN->JOIN. We should be able to handle this case.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+        Reducer 7 <- Map 6 (SIMPLE_EDGE), Map 9 (SIMPLE_EDGE)
+        Reducer 8 <- Reducer 7 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 9 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1, _col2
+                Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: string), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col1 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: hash(_col0) (type: int), hash(_col3) (type: int), hash(_col1) (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: sum(_col0), sum(_col1), sum(_col2)
+                    mode: hash
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint)
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 7 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
+                Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: count(1)
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: bigint)
+        Reducer 8 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+1711763	107	3531902962
+PREHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+        Reducer 7 <- Map 6 (SIMPLE_EDGE), Map 9 (SIMPLE_EDGE)
+        Reducer 8 <- Reducer 7 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 9 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1, _col2
+                Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: string), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col1 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: hash(_col0) (type: int), hash(_col3) (type: int), hash(_col1) (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: sum(_col0), sum(_col1), sum(_col2)
+                    mode: hash
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint)
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 7 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
+                Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: count(1)
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: bigint)
+        Reducer 8 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+1711763	107	3531902962
+PREHOOK: query: -- Enable hive.auto.convert.join.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Enable hive.auto.convert.join.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Reducer 5 (BROADCAST_EDGE)
+        Map 4 <- Map 6 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 5 <- Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        outputColumnNames: _col1, _col2
+                        input vertices:
+                          1 Map 3
+                        Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                        Select Operator
+                          expressions: _col1 (type: string), _col2 (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                          Map Join Operator
+                            condition map:
+                                 Inner Join 0 to 1
+                            keys:
+                              0 _col0 (type: string)
+                              1 _col0 (type: string)
+                            outputColumnNames: _col0, _col1, _col3
+                            input vertices:
+                              1 Reducer 5
+                            Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                            Select Operator
+                              expressions: hash(_col0) (type: int), hash(_col3) (type: int), hash(_col1) (type: int)
+                              outputColumnNames: _col0, _col1, _col2
+                              Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                              Group By Operator
+                                aggregations: sum(_col0), sum(_col1), sum(_col2)
+                                mode: hash
+                                outputColumnNames: _col0, _col1, _col2
+                                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                                Reduce Output Operator
+                                  sort order: 
+                                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                                  value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        outputColumnNames: _col1
+                        input vertices:
+                          1 Map 6
+                        Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                        Select Operator
+                          expressions: _col1 (type: string)
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                          Group By Operator
+                            aggregations: count(1)
+                            keys: _col0 (type: string)
+                            mode: hash
+                            outputColumnNames: _col0, _col1
+                            Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string)
+                              sort order: +
+                              Map-reduce partition columns: _col0 (type: string)
+                              Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                              value expressions: _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 5 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value
+      FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b
+      JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+1711763	107	3531902962
+PREHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+        Reducer 7 <- Map 6 (SIMPLE_EDGE), Map 9 (SIMPLE_EDGE)
+        Reducer 8 <- Reducer 7 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 9 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1, _col2
+                Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: string), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col1 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1, _col2, _col3
+                Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: hash(_col2) (type: int), hash(_col3) (type: int), hash(_col1) (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: sum(_col0), sum(_col1), sum(_col2)
+                    mode: hash
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint)
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 7 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
+                Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: count(1)
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: bigint)
+        Reducer 8 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+1711763	107	3531902962
+PREHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+        Reducer 7 <- Map 6 (SIMPLE_EDGE), Map 9 (SIMPLE_EDGE)
+        Reducer 8 <- Reducer 7 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 9 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1, _col2
+                Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: string), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col1 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1, _col2, _col3
+                Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: hash(_col2) (type: int), hash(_col3) (type: int), hash(_col1) (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: sum(_col0), sum(_col1), sum(_col2)
+                    mode: hash
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint)
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 7 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
+                Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: count(1)
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col1 (type: bigint)
+        Reducer 8 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+1711763	107	3531902962
+PREHOOK: query: -- Enable hive.auto.convert.join.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Enable hive.auto.convert.join.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Reducer 5 (BROADCAST_EDGE)
+        Map 4 <- Map 6 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 5 <- Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        outputColumnNames: _col1, _col2
+                        input vertices:
+                          1 Map 3
+                        Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                        Select Operator
+                          expressions: _col1 (type: string), _col2 (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                          Map Join Operator
+                            condition map:
+                                 Inner Join 0 to 1
+                            keys:
+                              0 _col0 (type: string)
+                              1 _col0 (type: string)
+                            outputColumnNames: _col1, _col2, _col3
+                            input vertices:
+                              1 Reducer 5
+                            Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                            Select Operator
+                              expressions: hash(_col2) (type: int), hash(_col3) (type: int), hash(_col1) (type: int)
+                              outputColumnNames: _col0, _col1, _col2
+                              Statistics: Num rows: 60 Data size: 10980 Basic stats: COMPLETE Column stats: COMPLETE
+                              Group By Operator
+                                aggregations: sum(_col0), sum(_col1), sum(_col2)
+                                mode: hash
+                                outputColumnNames: _col0, _col1, _col2
+                                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                                Reduce Output Operator
+                                  sort order: 
+                                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                                  value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        outputColumnNames: _col1
+                        input vertices:
+                          1 Map 6
+                        Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                        Select Operator
+                          expressions: _col1 (type: string)
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 60 Data size: 5160 Basic stats: COMPLETE Column stats: COMPLETE
+                          Group By Operator
+                            aggregations: count(1)
+                            keys: _col0 (type: string)
+                            mode: hash
+                            outputColumnNames: _col0, _col1
+                            Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string)
+                              sort order: +
+                              Map-reduce partition columns: _col0 (type: string)
+                              Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                              value expressions: _col1 (type: bigint)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 2150 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 5 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 14 Data size: 1316 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col1 (type: bigint)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value))
+FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value
+      FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b
+      JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d
+      ON b.key = d.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+1711763	107	3531902962


[28/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out
new file mode 100644
index 0000000..13df214
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out
@@ -0,0 +1,90 @@
+PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit
+select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit
+select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+NULL	NULL	NULL
+PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+NULL	NULL	1
+PREHOOK: query: select key from(select key from (select key from cbo_t1 limit 5)cbo_t2  limit 5)cbo_t3  limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select key from(select key from (select key from cbo_t1 limit 5)cbo_t2  limit 5)cbo_t3  limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1
+1
+1
+1
+1
+PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  order by c_int limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1  order by c_int limit 5)cbo_t2  order by c_int limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+NULL	NULL
+NULL	NULL
+1	1
+1	1
+1	1
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1	12	6
+1	2	6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key  having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c  having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0  order by cbo_t3.c_int % c asc, cbo_t3.c_int, c desc limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key  having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c  having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0  order by cbo_t3.c_int % c asc, cbo_t3.c_int, c desc limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1	12	6
+1	2	6

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/cbo_rp_subq_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_subq_in.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_subq_in.q.out
new file mode 100644
index 0000000..f6bfad2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_rp_subq_in.q.out
@@ -0,0 +1,151 @@
+PREHOOK: query: -- 17. SubQueries In
+-- non agg, non corr
+select * 
+from src_cbo 
+where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- 17. SubQueries In
+-- non agg, non corr
+select * 
+from src_cbo 
+where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+90	val_90
+90	val_90
+90	val_90
+92	val_92
+95	val_95
+95	val_95
+96	val_96
+97	val_97
+97	val_97
+98	val_98
+98	val_98
+PREHOOK: query: -- agg, corr
+-- add back once rank issue fixed for cbo
+
+-- distinct, corr
+select * 
+from src_cbo b 
+where b.key in
+        (select distinct a.key 
+         from src_cbo a 
+         where b.value = a.value and a.key > '9'
+        ) order by b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- agg, corr
+-- add back once rank issue fixed for cbo
+
+-- distinct, corr
+select * 
+from src_cbo b 
+where b.key in
+        (select distinct a.key 
+         from src_cbo a 
+         where b.value = a.value and a.key > '9'
+        ) order by b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+90	val_90
+90	val_90
+90	val_90
+92	val_92
+95	val_95
+95	val_95
+96	val_96
+97	val_97
+97	val_97
+98	val_98
+98	val_98
+PREHOOK: query: -- non agg, corr, with join in Parent Query
+select p.p_partkey, li.l_suppkey 
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
+where li.l_linenumber = 1 and
+ li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
+ order by p.p_partkey
+PREHOOK: type: QUERY
+PREHOOK: Input: default@lineitem
+#### A masked pattern was here ####
+POSTHOOK: query: -- non agg, corr, with join in Parent Query
+select p.p_partkey, li.l_suppkey 
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey 
+where li.l_linenumber = 1 and
+ li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
+ order by p.p_partkey
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@lineitem
+#### A masked pattern was here ####
+4297	1798
+108570	8571
+PREHOOK: query: -- where and having
+-- Plan is:
+-- Stage 1: b semijoin sq1:src_cbo (subquery in where)
+-- Stage 2: group by Stage 1 o/p
+-- Stage 5: group by on sq2:src_cbo (subquery in having)
+-- Stage 6: Stage 2 o/p semijoin Stage 5
+select key, value, count(*) 
+from src_cbo b
+where b.key in (select key from src_cbo where src_cbo.key > '8')
+group by key, value
+having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- where and having
+-- Plan is:
+-- Stage 1: b semijoin sq1:src_cbo (subquery in where)
+-- Stage 2: group by Stage 1 o/p
+-- Stage 5: group by on sq2:src_cbo (subquery in having)
+-- Stage 6: Stage 2 o/p semijoin Stage 5
+select key, value, count(*) 
+from src_cbo b
+where b.key in (select key from src_cbo where src_cbo.key > '8')
+group by key, value
+having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+80	val_80	1
+82	val_82	1
+83	val_83	2
+84	val_84	2
+85	val_85	1
+86	val_86	1
+87	val_87	1
+9	val_9	1
+90	val_90	3
+92	val_92	1
+95	val_95	2
+96	val_96	1
+97	val_97	2
+98	val_98	2
+PREHOOK: query: -- non agg, non corr, windowing
+select p_mfgr, p_name, avg(p_size) 
+from part 
+group by p_mfgr, p_name
+having p_name in 
+  (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) order by p_mfgr
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- non agg, non corr, windowing
+select p_mfgr, p_name, avg(p_size) 
+from part 
+group by p_mfgr, p_name
+having p_name in 
+  (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) order by p_mfgr
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	almond antique burnished rose metallic	2.0
+Manufacturer#2	almond aquamarine midnight light salmon	2.0
+Manufacturer#3	almond antique misty red olive	1.0
+Manufacturer#4	almond aquamarine yellow dodger mint	7.0
+Manufacturer#5	almond antique sky peru orange	2.0

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf.q.out
new file mode 100644
index 0000000..b30d9da
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf.q.out
@@ -0,0 +1,123 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 8. Test UDF/UDAF
+select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 8. Test UDF/UDAF
+select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+20	18	18	1.0	1	1
+PREHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0  then 1 when 1 then 2 else 3 end, sum(case c_int when 0  then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0  then 1 when 1 then 2 else 3 end, sum(case c_int when 0  then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+18	18	18	1.0	1	1	2	36
+2	0	NULL	NULL	NULL	NULL	3	6
+PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+20	1	18	1.0	1	1
+PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0  then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0  then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0  then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0  then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+18	1	18	1.0	1	1	2	36
+2	0	NULL	NULL	NULL	NULL	3	6
+PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+#### A masked pattern was here ####
+POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+#### A masked pattern was here ####
+1	20	1	18
+PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1	20	1	1
+PREHOOK: query: select key,count(c_int) as a, avg(c_float) from cbo_t1 group by key order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select key,count(c_int) as a, avg(c_float) from cbo_t1 group by key order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+ 1	2	1.0
+ 1 	2	1.0
+1	12	1.0
+1 	2	1.0
+NULL	0	NULL
+PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0	NULL
+1	1.0
+PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0	NULL
+1	1.0
+PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0	NULL
+1	1.0

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf_stats_opt.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf_stats_opt.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf_stats_opt.q.out
new file mode 100644
index 0000000..3a589b4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf_stats_opt.q.out
@@ -0,0 +1,124 @@
+Warning: Value had a \n character in it.
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 8. Test UDF/UDAF
+select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 8. Test UDF/UDAF
+select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+20	18	18	1.0	1	1
+PREHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0  then 1 when 1 then 2 else 3 end, sum(case c_int when 0  then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0  then 1 when 1 then 2 else 3 end, sum(case c_int when 0  then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+18	18	18	1.0	1	1	2	36
+2	0	NULL	NULL	NULL	NULL	3	6
+PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+20	1	18	1.0	1	1
+PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0  then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0  then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0  then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0  then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+18	1	18	1.0	1	1	2	36
+2	0	NULL	NULL	NULL	NULL	3	6
+PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+#### A masked pattern was here ####
+POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+#### A masked pattern was here ####
+1	20	1	18
+PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1	20	1	1
+PREHOOK: query: select key,count(c_int) as a, avg(c_float) from cbo_t1 group by key order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select key,count(c_int) as a, avg(c_float) from cbo_t1 group by key order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+ 1	2	1.0
+ 1 	2	1.0
+1	12	1.0
+1 	2	1.0
+NULL	0	NULL
+PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0	NULL
+1	1.0
+PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0	NULL
+1	1.0
+PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0	NULL
+1	1.0

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out
new file mode 100644
index 0000000..4a7b935
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out
@@ -0,0 +1,237 @@
+PREHOOK: query: -- 10. Test views
+create view v1 as select c_int, value, c_boolean, dt from cbo_t1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: -- 10. Test views
+create view v1 as select c_int, value, c_boolean, dt from cbo_t1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select c_int, value from cbo_t2
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select c_int, value from cbo_t2
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: select value from v1 where c_boolean=false
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: select value from v1 where c_boolean=false
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+1
+1
+PREHOOK: query: select max(c_int) from v1 group by (c_boolean)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: select max(c_int) from v1 group by (c_boolean)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+NULL
+1
+1
+PREHOOK: query: select count(v1.c_int)  from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(v1.c_int)  from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+234
+PREHOOK: query: select count(v1.c_int)  from v1 join v2 on v1.c_int = v2.c_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(v1.c_int)  from v1 join v2 on v1.c_int = v2.c_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v2
+#### A masked pattern was here ####
+234
+PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+156
+PREHOOK: query: create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v3
+POSTHOOK: query: create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v3
+PREHOOK: query: select count(val) from v3 where val != '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v3
+#### A masked pattern was here ####
+POSTHOOK: query: select count(val) from v3 where val != '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v3
+#### A masked pattern was here ####
+96
+PREHOOK: query: with q1 as ( select key from cbo_t1 where key = '1')
+select count(*) from q1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select key from cbo_t1 where key = '1')
+select count(*) from q1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+12
+PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false)
+select count(value) from q1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false)
+select count(value) from q1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+2
+PREHOOK: query: create view v4 as
+with q1 as ( select key,c_int from cbo_t1  where key = '1')
+select * from q1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v4
+POSTHOOK: query: create view v4 as
+with q1 as ( select key,c_int from cbo_t1  where key = '1')
+select * from q1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v4
+PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false),
+q2 as ( select c_int,c_boolean from v1  where value = '1')
+select sum(c_int) from (select c_int from q1) a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false),
+q2 as ( select c_int,c_boolean from v1  where value = '1')
+select sum(c_int) from (select c_int from q1) a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+2
+PREHOOK: query: with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int  and cbo_t1.dt='2014'),
+q2 as ( select c_int,c_boolean from v1  where value = '1' or dt = '14')
+select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v4
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int  and cbo_t1.dt='2014'),
+q2 as ( select c_int,c_boolean from v1  where value = '1' or dt = '14')
+select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v4
+#### A masked pattern was here ####
+31104
+PREHOOK: query: drop view v1
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@v1
+PREHOOK: Output: default@v1
+POSTHOOK: query: drop view v1
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: default@v1
+PREHOOK: query: drop view v2
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@v2
+PREHOOK: Output: default@v2
+POSTHOOK: query: drop view v2
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@v2
+POSTHOOK: Output: default@v2
+PREHOOK: query: drop view v3
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@v3
+PREHOOK: Output: default@v3
+POSTHOOK: query: drop view v3
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@v3
+POSTHOOK: Output: default@v3
+PREHOOK: query: drop view v4
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@v4
+PREHOOK: Output: default@v4
+POSTHOOK: query: drop view v4
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@v4
+POSTHOOK: Output: default@v4

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/cluster.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cluster.q.out b/ql/src/test/results/clientpositive/llap/cluster.q.out
new file mode 100644
index 0000000..8c89ee3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cluster.q.out
@@ -0,0 +1,1563 @@
+PREHOOK: query: EXPLAIN
+SELECT * FROM SRC x where x.key = 10 CLUSTER BY x.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT * FROM SRC x where x.key = 10 CLUSTER BY x.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 10) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT * FROM SRC x where x.key = 10 CLUSTER BY x.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SRC x where x.key = 10 CLUSTER BY x.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+10	val_10
+PREHOOK: query: EXPLAIN
+SELECT * FROM SRC x  where x.key = 20 CLUSTER BY key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT * FROM SRC x  where x.key = 20 CLUSTER BY key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20
+PREHOOK: query: EXPLAIN
+SELECT x.* FROM SRC x where x.key = 20 CLUSTER BY key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT x.* FROM SRC x where x.key = 20 CLUSTER BY key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT x.* FROM SRC x where x.key = 20 CLUSTER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.* FROM SRC x where x.key = 20 CLUSTER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20
+PREHOOK: query: EXPLAIN
+SELECT x.*  FROM SRC x where x.key = 20 CLUSTER BY x.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT x.*  FROM SRC x where x.key = 20 CLUSTER BY x.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT x.*  FROM SRC x where x.key = 20 CLUSTER BY x.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.*  FROM SRC x where x.key = 20 CLUSTER BY x.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20
+PREHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1 FROM SRC x where x.key = 20 CLUSTER BY key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1 FROM SRC x where x.key = 20 CLUSTER BY key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT x.key, x.value as v1 FROM SRC x where x.key = 20 CLUSTER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.key, x.value as v1 FROM SRC x where x.key = 20 CLUSTER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20
+PREHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1 FROM SRC x where x.key = 20 CLUSTER BY x.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1 FROM SRC x where x.key = 20 CLUSTER BY x.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT x.key, x.value as v1 FROM SRC x where x.key = 20 CLUSTER BY x.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.key, x.value as v1 FROM SRC x where x.key = 20 CLUSTER BY x.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20
+PREHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1  FROM SRC x where x.key = 20 CLUSTER BY v1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1  FROM SRC x where x.key = 20 CLUSTER BY v1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: string)
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT x.key, x.value as v1  FROM SRC x where x.key = 20 CLUSTER BY v1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.key, x.value as v1  FROM SRC x where x.key = 20 CLUSTER BY v1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20
+PREHOOK: query: EXPLAIN
+SELECT y.* from (SELECT x.* FROM SRC x CLUSTER BY x.key) y where y.key = 20
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT y.* from (SELECT x.* FROM SRC x CLUSTER BY x.key) y where y.key = 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT y.* from (SELECT x.* FROM SRC x CLUSTER BY x.key) y where y.key = 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT y.* from (SELECT x.* FROM SRC x CLUSTER BY x.key) y where y.key = 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20
+PREHOOK: query: EXPLAIN 
+SELECT x.key, x.value as v1, y.key  FROM SRC x JOIN SRC y ON (x.key = y.key)  where x.key = 20 CLUSTER BY v1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+SELECT x.key, x.value as v1, y.key  FROM SRC x JOIN SRC y ON (x.key = y.key)  where x.key = 20 CLUSTER BY v1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+                outputColumnNames: _col0, _col1, _col5
+                Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col1 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col1 (type: string)
+                    Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col0 (type: string), _col2 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col1 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT x.key, x.value as v1, y.key  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY v1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.key, x.value as v1, y.key  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY v1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20	20
+PREHOOK: query: EXPLAIN 
+SELECT x.key, x.value as v1, y.*  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY v1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+SELECT x.key, x.value as v1, y.*  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY v1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col1 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col1 (type: string)
+                    Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col0 (type: string), _col2 (type: string), _col3 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT x.key, x.value as v1, y.*  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY v1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.key, x.value as v1, y.*  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY v1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20	20	val_20
+PREHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1, y.*  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY x.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1, y.*  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY x.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT x.key, x.value as v1, y.*  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY x.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.key, x.value as v1, y.*  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY x.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20	20	val_20
+PREHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1, y.key as yk  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT x.key, x.value as v1, y.key as yk  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key = 20) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: key (type: string)
+                      sort order: +
+                      Map-reduce partition columns: key (type: string)
+                      Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: string)
+                  1 key (type: string)
+                outputColumnNames: _col0, _col1, _col5
+                Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col1 (type: string), _col2 (type: string)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), VALUE._col1 (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 1060 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT x.key, x.value as v1, y.key as yk  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.key, x.value as v1, y.key as yk  FROM SRC x JOIN SRC y ON (x.key = y.key) where x.key = 20 CLUSTER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+20	val_20	20
+PREHOOK: query: EXPLAIN
+SELECT unioninput.*
+FROM (
+  FROM src select src.key, src.value WHERE src.key < 100
+  UNION ALL
+  FROM src SELECT src.* WHERE src.key > 100
+) unioninput
+CLUSTER BY unioninput.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT unioninput.*
+FROM (
+  FROM src select src.key, src.value WHERE src.key < 100
+  UNION ALL
+  FROM src SELECT src.* WHERE src.key > 100
+) unioninput
+CLUSTER BY unioninput.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Map 4 <- Union 2 (CONTAINS)
+        Reducer 3 <- Union 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key < 100) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 332 Data size: 59096 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key > 100) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 332 Data size: 59096 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 332 Data size: 59096 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 332 Data size: 59096 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT unioninput.*
+FROM (
+  FROM src select src.key, src.value WHERE src.key < 100
+  UNION ALL
+  FROM src SELECT src.* WHERE src.key > 100
+) unioninput
+CLUSTER BY unioninput.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT unioninput.*
+FROM (
+  FROM src select src.key, src.value WHERE src.key < 100
+  UNION ALL
+  FROM src SELECT src.* WHERE src.key > 100
+) unioninput
+CLUSTER BY unioninput.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+103	val_103
+103	val_103
+104	val_104
+104	val_104
+105	val_105
+11	val_11
+111	val_111
+113	val_113
+113	val_113
+114	val_114
+116	val_116
+118	val_118
+118	val_118
+119	val_119
+119	val_119
+119	val_119
+12	val_12
+12	val_12
+120	val_120
+120	val_120
+125	val_125
+125	val_125
+126	val_126
+128	val_128
+128	val_128
+128	val_128
+129	val_129
+129	val_129
+131	val_131
+133	val_133
+134	val_134
+134	val_134
+136	val_136
+137	val_137
+137	val_137
+138	val_138
+138	val_138
+138	val_138
+138	val_138
+143	val_143
+145	val_145
+146	val_146
+146	val_146
+149	val_149
+149	val_149
+15	val_15
+15	val_15
+150	val_150
+152	val_152
+152	val_152
+153	val_153
+155	val_155
+156	val_156
+157	val_157
+158	val_158
+160	val_160
+162	val_162
+163	val_163
+164	val_164
+164	val_164
+165	val_165
+165	val_165
+166	val_166
+167	val_167
+167	val_167
+167	val_167
+168	val_168
+169	val_169
+169	val_169
+169	val_169
+169	val_169
+17	val_17
+170	val_170
+172	val_172
+172	val_172
+174	val_174
+174	val_174
+175	val_175
+175	val_175
+176	val_176
+176	val_176
+177	val_177
+178	val_178
+179	val_179
+179	val_179
+18	val_18
+18	val_18
+180	val_180
+181	val_181
+183	val_183
+186	val_186
+187	val_187
+187	val_187
+187	val_187
+189	val_189
+19	val_19
+190	val_190
+191	val_191
+191	val_191
+192	val_192
+193	val_193
+193	val_193
+193	val_193
+194	val_194
+195	val_195
+195	val_195
+196	val_196
+197	val_197
+197	val_197
+199	val_199
+199	val_199
+199	val_199
+2	val_2
+20	val_20
+200	val_200
+200	val_200
+201	val_201
+202	val_202
+203	val_203
+203	val_203
+205	val_205
+205	val_205
+207	val_207
+207	val_207
+208	val_208
+208	val_208
+208	val_208
+209	val_209
+209	val_209
+213	val_213
+213	val_213
+214	val_214
+216	val_216
+216	val_216
+217	val_217
+217	val_217
+218	val_218
+219	val_219
+219	val_219
+221	val_221
+221	val_221
+222	val_222
+223	val_223
+223	val_223
+224	val_224
+224	val_224
+226	val_226
+228	val_228
+229	val_229
+229	val_229
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+233	val_233
+233	val_233
+235	val_235
+237	val_237
+237	val_237
+238	val_238
+238	val_238
+239	val_239
+239	val_239
+24	val_24
+24	val_24
+241	val_241
+242	val_242
+242	val_242
+244	val_244
+247	val_247
+248	val_248
+249	val_249
+252	val_252
+255	val_255
+255	val_255
+256	val_256
+256	val_256
+257	val_257
+258	val_258
+26	val_26
+26	val_26
+260	val_260
+262	val_262
+263	val_263
+265	val_265
+265	val_265
+266	val_266
+27	val_27
+272	val_272
+272	val_272
+273	val_273
+273	val_273
+273	val_273
+274	val_274
+275	val_275
+277	val_277
+277	val_277
+277	val_277
+277	val_277
+278	val_278
+278	val_278
+28	val_28
+280	val_280
+280	val_280
+281	val_281
+281	val_281
+282	val_282
+282	val_282
+283	val_283
+284	val_284
+285	val_285
+286	val_286
+287	val_287
+288	val_288
+288	val_288
+289	val_289
+291	val_291
+292	val_292
+296	val_296
+298	val_298
+298	val_298
+298	val_298
+30	val_30
+302	val_302
+305	val_305
+306	val_306
+307	val_307
+307	val_307
+308	val_308
+309	val_309
+309	val_309
+310	val_310
+311	val_311
+311	val_311
+311	val_311
+315	val_315
+316	val_316
+316	val_316
+316	val_316
+317	val_317
+317	val_317
+318	val_318
+318	val_318
+318	val_318
+321	val_321
+321	val_321
+322	val_322
+322	val_322
+323	val_323
+325	val_325
+325	val_325
+327	val_327
+327	val_327
+327	val_327
+33	val_33
+331	val_331
+331	val_331
+332	val_332
+333	val_333
+333	val_333
+335	val_335
+336	val_336
+338	val_338
+339	val_339
+34	val_34
+341	val_341
+342	val_342
+342	val_342
+344	val_344
+344	val_344
+345	val_345
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+35	val_35
+35	val_35
+35	val_35
+351	val_351
+353	val_353
+353	val_353
+356	val_356
+360	val_360
+362	val_362
+364	val_364
+365	val_365
+366	val_366
+367	val_367
+367	val_367
+368	val_368
+369	val_369
+369	val_369
+369	val_369
+37	val_37
+37	val_37
+373	val_373
+374	val_374
+375	val_375
+377	val_377
+378	val_378
+379	val_379
+382	val_382
+382	val_382
+384	val_384
+384	val_384
+384	val_384
+386	val_386
+389	val_389
+392	val_392
+393	val_393
+394	val_394
+395	val_395
+395	val_395
+396	val_396
+396	val_396
+396	val_396
+397	val_397
+397	val_397
+399	val_399
+399	val_399
+4	val_4
+400	val_400
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+401	val_401
+402	val_402
+403	val_403
+403	val_403
+403	val_403
+404	val_404
+404	val_404
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+407	val_407
+409	val_409
+409	val_409
+409	val_409
+41	val_41
+411	val_411
+413	val_413
+413	val_413
+414	val_414
+414	val_414
+417	val_417
+417	val_417
+417	val_417
+418	val_418
+419	val_419
+42	val_42
+42	val_42
+421	val_421
+424	val_424
+424	val_424
+427	val_427
+429	val_429
+429	val_429
+43	val_43
+430	val_430
+430	val_430
+430	val_430
+431	val_431
+431	val_431
+431	val_431
+432	val_432
+435	val_435
+436	val_436
+437	val_437
+438	val_438
+438	val_438
+438	val_438
+439	val_439
+439	val_439
+44	val_44
+443	val_443
+444	val_444
+446	val_446
+448	val_448
+449	val_449
+452	val_452
+453	val_453
+454	val_454
+454	val_454
+454	val_454
+455	val_455
+457	val_457
+458	val_458
+458	val_458
+459	val_459
+459	val_459
+460	val_460
+462	val_462
+462	val_462
+463	val_463
+463	val_463
+466	val_466
+466	val_466
+466	val_466
+467	val_467
+468	val_468
+468	val_468
+468	val_468
+468	val_468
+469	val_469
+469	val_469
+469	val_469
+469	val_469
+469	val_469
+47	val_47
+470	val_470
+472	val_472
+475	val_475
+477	val_477
+478	val_478
+478	val_478
+479	val_479
+480	val_480
+480	val_480
+480	val_480
+481	val_481
+482	val_482
+483	val_483
+484	val_484
+485	val_485
+487	val_487
+489	val_489
+489	val_489
+489	val_489
+489	val_489
+490	val_490
+491	val_491
+492	val_492
+492	val_492
+493	val_493
+494	val_494
+495	val_495
+496	val_496
+497	val_497
+498	val_498
+498	val_498
+498	val_498
+5	val_5
+5	val_5
+5	val_5
+51	val_51
+51	val_51
+53	val_53
+54	val_54
+57	val_57
+58	val_58
+58	val_58
+64	val_64
+65	val_65
+66	val_66
+67	val_67
+67	val_67
+69	val_69
+70	val_70
+70	val_70
+70	val_70
+72	val_72
+72	val_72
+74	val_74
+76	val_76
+76	val_76
+77	val_77
+78	val_78
+8	val_8
+80	val_80
+82	val_82
+83	val_83
+83	val_83
+84	val_84
+84	val_84
+85	val_85
+86	val_86
+87	val_87
+9	val_9
+90	val_90
+90	val_90
+90	val_90
+92	val_92
+95	val_95
+95	val_95
+96	val_96
+97	val_97
+97	val_97
+98	val_98
+98	val_98


[35/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f562dfb5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f562dfb5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f562dfb5

Branch: refs/heads/master
Commit: f562dfb5207e8246e5f12696e0d7f373c3e3bf4c
Parents: 399af46
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Tue Oct 18 18:39:27 2016 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Tue Oct 18 18:39:27 2016 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   84 +
 .../alter_table_invalidate_column_stats.q.out   |  932 ++++
 .../clientpositive/llap/authorization_2.q.out   |  667 +++
 .../llap/auto_smb_mapjoin_14.q.out              | 1973 +++++++
 .../clientpositive/llap/bucketmapjoin1.q.out    | 1068 ++++
 .../clientpositive/llap/bucketmapjoin2.q.out    | 1234 +++++
 .../clientpositive/llap/bucketmapjoin3.q.out    |  830 +++
 .../clientpositive/llap/bucketmapjoin4.q.out    |  798 +++
 .../llap/bucketsortoptimize_insert_6.q.out      | 1239 +++++
 .../llap/bucketsortoptimize_insert_7.q.out      |  621 +++
 .../clientpositive/llap/cbo_rp_limit.q.out      |   90 +
 .../clientpositive/llap/cbo_rp_subq_in.q.out    |  151 +
 .../clientpositive/llap/cbo_rp_udf_udaf.q.out   |  123 +
 .../llap/cbo_rp_udf_udaf_stats_opt.q.out        |  124 +
 .../clientpositive/llap/cbo_rp_views.q.out      |  237 +
 .../results/clientpositive/llap/cluster.q.out   | 1563 ++++++
 .../columnStatsUpdateForStatsOptimizer_1.q.out  | 1029 ++++
 .../llap/column_access_stats.q.out              |  938 ++++
 .../llap/columnstats_part_coltype.q.out         |  441 ++
 .../llap/correlationoptimizer3.q.out            | 1388 +++++
 .../results/clientpositive/llap/database.q.out  | 1484 +++++
 .../llap/drop_partition_with_stats.q.out        |  496 ++
 .../extrapolate_part_stats_partial_ndv.q.out    | 1301 +++++
 .../llap/groupby_grouping_id2.q.out             |  234 +
 .../llap/groupby_resolution.q.out               |  838 +++
 .../clientpositive/llap/join32_lessSize.q.out   | 2501 +++++++++
 .../clientpositive/llap/lateral_view.q.out      |  739 +++
 .../clientpositive/llap/limit_pushdown3.q.out   | 1446 +++++
 .../clientpositive/llap/multi_column_in.q.out   |  365 ++
 .../llap/multi_column_in_single.q.out           |  327 ++
 .../clientpositive/llap/multi_insert.q.out      | 2409 ++++++++
 .../llap/multi_insert_lateral_view.q.out        | 1507 ++++++
 .../clientpositive/llap/offset_limit.q.out      |  277 +
 .../llap/offset_limit_ppd_optimizer.q.out       | 1476 +++++
 .../clientpositive/llap/orc_create.q.out        |  782 +++
 .../clientpositive/llap/orc_ppd_varchar.q.out   |  220 +
 .../llap/orc_predicate_pushdown.q.out           | 1184 ++++
 .../llap/orc_split_elimination.q.out            |  532 ++
 .../llap/parquet_predicate_pushdown.q.out       | 1277 +++++
 .../clientpositive/llap/parquet_types.q.out     |  379 ++
 .../test/results/clientpositive/llap/pcs.q.out  | 1738 ++++++
 .../clientpositive/llap/ppd_union_view.q.out    |  686 +++
 .../clientpositive/llap/ppr_pushdown.q.out      |  328 ++
 .../llap/reduce_deduplicate_extended.q.out      | 5122 ++++++++++++++++++
 .../clientpositive/llap/skewjoinopt15.q.out     |  500 ++
 .../clientpositive/llap/skiphf_aggr.q.out       |  267 +
 .../clientpositive/llap/smb_mapjoin_14.q.out    | 1709 ++++++
 .../clientpositive/llap/smb_mapjoin_15.q.out    | 1140 ++++
 .../clientpositive/llap/smb_mapjoin_17.q.out    | 1069 ++++
 .../clientpositive/llap/smb_mapjoin_18.q.out    |  480 ++
 .../clientpositive/llap/smb_mapjoin_19.q.out    |  259 +
 .../clientpositive/llap/smb_mapjoin_4.q.out     | 1433 +++++
 .../clientpositive/llap/smb_mapjoin_5.q.out     | 1433 +++++
 .../clientpositive/llap/smb_mapjoin_6.q.out     | 2755 ++++++++++
 .../results/clientpositive/llap/stats11.q.out   |  996 ++++
 .../clientpositive/llap/subquery_views.q.out    |  571 ++
 .../clientpositive/llap/varchar_udf1.q.out      |  453 ++
 .../llap/vector_adaptor_usage_mode.q.out        | 1000 ++++
 58 files changed, 57243 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 8aee7f5..73cc8a9 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -434,6 +434,7 @@ minillap.query.files=acid_bucket_pruning.q,\
   orc_ppd_schema_evol_3a.q,\
   global_limit.q,\
   dynamic_partition_pruning_2.q,\
+  union_fast_stats.q,\
   tez_union_dynamic_partition.q,\
   load_fs2.q
 
@@ -598,6 +599,89 @@ minillaplocal.query.files=acid_vectorization_missing_cols.q,\
   vectorized_ptf.q,\
   windowing.q,\
   windowing_gby.q,\
+  unionDistinct_2.q,\
+  auto_smb_mapjoin_14.q,\
+  subquery_views.q,\
+  vector_nullsafe_join.q,\
+  smb_mapjoin_18.q,\
+  varchar_udf1.q,\
+  vectorized_parquet.q,\
+  bucketmapjoin2.q,\
+  orc_ppd_varchar.q,\
+  multi_insert.q,\
+  cbo_rp_limit.q,\
+  vector_interval_2.q,\
+  cbo_semijoin.q,\
+  parquet_predicate_pushdown.q,\
+  vector_outer_join5.q,\
+  smb_mapjoin_6.q,\
+  multi_column_in.q,\
+  orc_predicate_pushdown.q,\
+  columnStatsUpdateForStatsOptimizer_1.q,\
+  reduce_deduplicate_extended.q,\
+  limit_pushdown3.q,\
+  offset_limit.q,\
+  vector_join_nulls.q,\
+  correlationoptimizer3.q,\
+  vectorization_0.q,\
+  columnstats_part_coltype.q,\
+  drop_partition_with_stats.q,\
+  dynpart_sort_optimization2.q,\
+  multi_column_in_single.q,\
+  join32_lessSize.q,\
+  alter_table_invalidate_column_stats.q,\
+  bucketmapjoin1.q,\
+  ppr_pushdown.q,\
+  smb_mapjoin_14.q,\
+  vector_between_in.q,\
+  offset_limit_ppd_optimizer.q,\
+  cluster.q,\
+  subquery_in.q,\
+  stats11.q,\
+  orc_create.q,\
+  orc_split_elimination.q,\
+  order_null.q,\
+  cbo_rp_subq_in.q,\
+  skewjoinopt15.q,\
+  authorization_2.q,\
+  cbo_subq_in.q,\
+  alter_merge_orc.q,\
+  bucketsortoptimize_insert_6.q,\
+  bucketmapjoin4.q,\
+  orc_merge7.q,\
+  column_access_stats.q,\
+  smb_mapjoin_5.q,\
+  vector_adaptor_usage_mode.q,\
+  optimize_nullscan.q,\
+  parquet_types.q,\
+  groupby_grouping_id2.q,\
+  constprog_semijoin.q,\
+  ppd_union_view.q,\
+  smb_mapjoin_19.q,\
+  cbo_rp_views.q,\
+  bucketsortoptimize_insert_7.q,\
+  smb_mapjoin_15.q,\
+  vectorized_nested_mapjoin.q,\
+  skiphf_aggr.q,\
+  multi_insert_lateral_view.q,\
+  smb_mapjoin_4.q,\
+  cbo_udf_udaf.q,\
+  bucketmapjoin3.q,\
+  metadataonly1.q,\
+  lateral_view.q,\
+  extrapolate_part_stats_partial_ndv.q,\
+  cbo_views.q,\
+  limit_pushdown.q,\
+  cbo_rp_udf_udaf.q,\
+  count.q,\
+  vector_inner_join.q,\
+  temp_table.q,\
+  vector_partition_diff_num_cols.q,\
+  vector_count_distinct.q,\
+  cbo_rp_udf_udaf_stats_opt.q,\
+  database.q,\
+  smb_mapjoin_17.q,\
+  groupby_resolution.q,\
   windowing_windowspec2.q
 
 encrypted.query.files=encryption_join_unencrypted_tbl.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/alter_table_invalidate_column_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/alter_table_invalidate_column_stats.q.out b/ql/src/test/results/clientpositive/llap/alter_table_invalidate_column_stats.q.out
new file mode 100644
index 0000000..85d7dc4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/alter_table_invalidate_column_stats.q.out
@@ -0,0 +1,932 @@
+PREHOOK: query: drop database if exists statsdb1
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: drop database if exists statsdb1
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: create database statsdb1
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:statsdb1
+POSTHOOK: query: create database statsdb1
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:statsdb1
+PREHOOK: query: drop database if exists statsdb2
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: drop database if exists statsdb2
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: create database statsdb2
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:statsdb2
+POSTHOOK: query: create database statsdb2
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:statsdb2
+PREHOOK: query: create table statsdb1.testtable1 (col1 int, col2 string, col3 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:statsdb1
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: create table statsdb1.testtable1 (col1 int, col2 string, col3 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:statsdb1
+POSTHOOK: Output: statsdb1@testtable1
+PREHOOK: query: insert into statsdb1.testtable1 select key, value, 'val3' from src limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: insert into statsdb1.testtable1 select key, value, 'val3' from src limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: statsdb1@testtable1
+POSTHOOK: Lineage: testtable1.col1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: testtable1.col2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: testtable1.col3 SIMPLE []
+PREHOOK: query: create table statsdb1.testpart1 (col1 int, col2 string, col3 string) partitioned by (part string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:statsdb1
+PREHOOK: Output: statsdb1@testpart1
+POSTHOOK: query: create table statsdb1.testpart1 (col1 int, col2 string, col3 string) partitioned by (part string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:statsdb1
+POSTHOOK: Output: statsdb1@testpart1
+PREHOOK: query: insert into statsdb1.testpart1 partition (part = 'part1') select key, value, 'val3' from src limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: statsdb1@testpart1@part=part1
+POSTHOOK: query: insert into statsdb1.testpart1 partition (part = 'part1') select key, value, 'val3' from src limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: statsdb1@testpart1@part=part1
+POSTHOOK: Lineage: testpart1 PARTITION(part=part1).col1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: testpart1 PARTITION(part=part1).col2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: testpart1 PARTITION(part=part1).col3 SIMPLE []
+PREHOOK: query: insert into statsdb1.testpart1 partition (part = 'part2') select key, value, 'val3' from src limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: statsdb1@testpart1@part=part2
+POSTHOOK: query: insert into statsdb1.testpart1 partition (part = 'part2') select key, value, 'val3' from src limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: statsdb1@testpart1@part=part2
+POSTHOOK: Lineage: testpart1 PARTITION(part=part2).col1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: testpart1 PARTITION(part=part2).col2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: testpart1 PARTITION(part=part2).col3 SIMPLE []
+PREHOOK: query: use statsdb1
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:statsdb1
+POSTHOOK: query: use statsdb1
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:statsdb1
+PREHOOK: query: analyze table testtable1 compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: statsdb1@testtable1
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table testtable1 compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: statsdb1@testtable1
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted statsdb1.testtable1 col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	                    	                    	0                   	1                   	4.0                 	4                   	                    	                    	from deserializer   
+PREHOOK: query: alter table testtable1 replace columns (col1 int, col2 string, col4 string)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: statsdb1@testtable1
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: alter table testtable1 replace columns (col1 int, col2 string, col4 string)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: statsdb1@testtable1
+POSTHOOK: Output: statsdb1@testtable1
+PREHOOK: query: describe formatted statsdb1.testtable1 col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col4
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col4
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col4                	string              	                    	                    	                    	                    	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: alter table testtable1 change col1 col1 string
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: statsdb1@testtable1
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: alter table testtable1 change col1 col1 string
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: statsdb1@testtable1
+POSTHOOK: Output: statsdb1@testtable1
+PREHOOK: query: describe formatted statsdb1.testtable1 col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	string              	                    	                    	                    	                    	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col4
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col4
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col4                	string              	                    	                    	                    	                    	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: alter table statsdb1.testtable1 rename to statsdb2.testtable2
+PREHOOK: type: ALTERTABLE_RENAME
+PREHOOK: Input: statsdb1@testtable1
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: alter table statsdb1.testtable1 rename to statsdb2.testtable2
+POSTHOOK: type: ALTERTABLE_RENAME
+POSTHOOK: Input: statsdb1@testtable1
+POSTHOOK: Output: statsdb1@testtable1
+POSTHOOK: Output: statsdb2@testtable2
+PREHOOK: query: analyze table testpart1 compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Input: statsdb1@testpart1@part=part1
+PREHOOK: Input: statsdb1@testpart1@part=part2
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table testpart1 compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Input: statsdb1@testpart1@part=part1
+POSTHOOK: Input: statsdb1@testpart1@part=part2
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	                    	                    	0                   	1                   	4.0                 	4                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	18                  	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	18                  	6.8                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	                    	                    	0                   	1                   	4.0                 	4                   	                    	                    	from deserializer   
+PREHOOK: query: alter table statsdb1.testpart1 partition (part = 'part2') rename to partition (part = 'part3')
+PREHOOK: type: ALTERTABLE_RENAMEPART
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1@part=part2
+POSTHOOK: query: alter table statsdb1.testpart1 partition (part = 'part2') rename to partition (part = 'part3')
+POSTHOOK: type: ALTERTABLE_RENAMEPART
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Input: statsdb1@testpart1@part=part2
+POSTHOOK: Output: statsdb1@testpart1@part=part2
+POSTHOOK: Output: statsdb1@testpart1@part=part3
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	                    	                    	0                   	1                   	4.0                 	4                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: alter table statsdb1.testpart1 replace columns (col1 int, col2 string, col4 string) cascade
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1@part=part1
+PREHOOK: Output: statsdb1@testpart1@part=part3
+POSTHOOK: query: alter table statsdb1.testpart1 replace columns (col1 int, col2 string, col4 string) cascade
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Output: statsdb1@testpart1
+POSTHOOK: Output: statsdb1@testpart1@part=part1
+POSTHOOK: Output: statsdb1@testpart1@part=part3
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col4
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col4
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col4                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: alter table statsdb1.testpart1 change column col1 col1 string
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1
+POSTHOOK: query: alter table statsdb1.testpart1 change column col1 col1 string
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Output: statsdb1@testpart1
+PREHOOK: query: alter table statsdb1.testpart1 partition (part) change column col1 col1 string
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1@part=part1
+PREHOOK: Output: statsdb1@testpart1@part=part3
+POSTHOOK: query: alter table statsdb1.testpart1 partition (part) change column col1 col1 string
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Input: statsdb1@testpart1@part=part1
+POSTHOOK: Input: statsdb1@testpart1@part=part3
+POSTHOOK: Output: statsdb1@testpart1@part=part1
+POSTHOOK: Output: statsdb1@testpart1@part=part3
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col1                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col4
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col4
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col4                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: alter table statsdb1.testpart1 rename to statsdb2.testpart2
+PREHOOK: type: ALTERTABLE_RENAME
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1
+POSTHOOK: query: alter table statsdb1.testpart1 rename to statsdb2.testpart2
+POSTHOOK: type: ALTERTABLE_RENAME
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Output: statsdb1@testpart1
+POSTHOOK: Output: statsdb2@testpart2
+PREHOOK: query: use statsdb2
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:statsdb2
+POSTHOOK: query: use statsdb2
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:statsdb2
+PREHOOK: query: alter table statsdb2.testpart2 drop partition (part = 'part1')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: statsdb2@testpart2
+PREHOOK: Output: statsdb2@testpart2@part=part1
+POSTHOOK: query: alter table statsdb2.testpart2 drop partition (part = 'part1')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: statsdb2@testpart2
+POSTHOOK: Output: statsdb2@testpart2@part=part1
+PREHOOK: query: drop table statsdb2.testpart2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: statsdb2@testpart2
+PREHOOK: Output: statsdb2@testpart2
+POSTHOOK: query: drop table statsdb2.testpart2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: statsdb2@testpart2
+POSTHOOK: Output: statsdb2@testpart2
+PREHOOK: query: drop table statsdb2.testtable2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: statsdb2@testtable2
+PREHOOK: Output: statsdb2@testtable2
+POSTHOOK: query: drop table statsdb2.testtable2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: statsdb2@testtable2
+POSTHOOK: Output: statsdb2@testtable2
+PREHOOK: query: use default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: use default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: drop database statsdb1
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:statsdb1
+PREHOOK: Output: database:statsdb1
+POSTHOOK: query: drop database statsdb1
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:statsdb1
+POSTHOOK: Output: database:statsdb1
+PREHOOK: query: drop database statsdb2
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:statsdb2
+PREHOOK: Output: database:statsdb2
+POSTHOOK: query: drop database statsdb2
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:statsdb2
+POSTHOOK: Output: database:statsdb2
+PREHOOK: query: drop database if exists statsdb1
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: drop database if exists statsdb1
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: create database statsdb1
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:statsdb1
+POSTHOOK: query: create database statsdb1
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:statsdb1
+PREHOOK: query: drop database if exists statsdb2
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: drop database if exists statsdb2
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: create database statsdb2
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:statsdb2
+POSTHOOK: query: create database statsdb2
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:statsdb2
+PREHOOK: query: create table statsdb1.testtable1 (col1 int, col2 string, col3 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:statsdb1
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: create table statsdb1.testtable1 (col1 int, col2 string, col3 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:statsdb1
+POSTHOOK: Output: statsdb1@testtable1
+PREHOOK: query: insert into statsdb1.testtable1 select key, value, 'val3' from src limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: insert into statsdb1.testtable1 select key, value, 'val3' from src limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: statsdb1@testtable1
+POSTHOOK: Lineage: testtable1.col1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: testtable1.col2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: testtable1.col3 SIMPLE []
+PREHOOK: query: create table statsdb1.testpart1 (col1 int, col2 string, col3 string) partitioned by (part string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:statsdb1
+PREHOOK: Output: statsdb1@testpart1
+POSTHOOK: query: create table statsdb1.testpart1 (col1 int, col2 string, col3 string) partitioned by (part string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:statsdb1
+POSTHOOK: Output: statsdb1@testpart1
+PREHOOK: query: insert into statsdb1.testpart1 partition (part = 'part1') select key, value, 'val3' from src limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: statsdb1@testpart1@part=part1
+POSTHOOK: query: insert into statsdb1.testpart1 partition (part = 'part1') select key, value, 'val3' from src limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: statsdb1@testpart1@part=part1
+POSTHOOK: Lineage: testpart1 PARTITION(part=part1).col1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: testpart1 PARTITION(part=part1).col2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: testpart1 PARTITION(part=part1).col3 SIMPLE []
+PREHOOK: query: insert into statsdb1.testpart1 partition (part = 'part2') select key, value, 'val3' from src limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: statsdb1@testpart1@part=part2
+POSTHOOK: query: insert into statsdb1.testpart1 partition (part = 'part2') select key, value, 'val3' from src limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: statsdb1@testpart1@part=part2
+POSTHOOK: Lineage: testpart1 PARTITION(part=part2).col1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: testpart1 PARTITION(part=part2).col2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: testpart1 PARTITION(part=part2).col3 SIMPLE []
+PREHOOK: query: use statsdb1
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:statsdb1
+POSTHOOK: query: use statsdb1
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:statsdb1
+PREHOOK: query: analyze table testtable1 compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: statsdb1@testtable1
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table testtable1 compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: statsdb1@testtable1
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted statsdb1.testtable1 col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	                    	                    	0                   	1                   	4.0                 	4                   	                    	                    	from deserializer   
+PREHOOK: query: alter table testtable1 replace columns (col1 int, col2 string, col4 string)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: statsdb1@testtable1
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: alter table testtable1 replace columns (col1 int, col2 string, col4 string)
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: statsdb1@testtable1
+POSTHOOK: Output: statsdb1@testtable1
+PREHOOK: query: describe formatted statsdb1.testtable1 col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col4
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col4
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col4                	string              	                    	                    	                    	                    	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: alter table testtable1 change col1 col1 string
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: statsdb1@testtable1
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: alter table testtable1 change col1 col1 string
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: statsdb1@testtable1
+POSTHOOK: Output: statsdb1@testtable1
+PREHOOK: query: describe formatted statsdb1.testtable1 col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	string              	                    	                    	                    	                    	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testtable1 col4
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testtable1
+POSTHOOK: query: describe formatted statsdb1.testtable1 col4
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testtable1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col4                	string              	                    	                    	                    	                    	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: alter table statsdb1.testtable1 rename to statsdb2.testtable2
+PREHOOK: type: ALTERTABLE_RENAME
+PREHOOK: Input: statsdb1@testtable1
+PREHOOK: Output: statsdb1@testtable1
+POSTHOOK: query: alter table statsdb1.testtable1 rename to statsdb2.testtable2
+POSTHOOK: type: ALTERTABLE_RENAME
+POSTHOOK: Input: statsdb1@testtable1
+POSTHOOK: Output: statsdb1@testtable1
+POSTHOOK: Output: statsdb2@testtable2
+PREHOOK: query: analyze table testpart1 compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Input: statsdb1@testpart1@part=part1
+PREHOOK: Input: statsdb1@testpart1@part=part2
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table testpart1 compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Input: statsdb1@testpart1@part=part1
+POSTHOOK: Input: statsdb1@testpart1@part=part2
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	                    	                    	0                   	1                   	4.0                 	4                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	18                  	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	18                  	6.8                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part2') col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	                    	                    	0                   	1                   	4.0                 	4                   	                    	                    	from deserializer   
+PREHOOK: query: alter table statsdb1.testpart1 partition (part = 'part2') rename to partition (part = 'part3')
+PREHOOK: type: ALTERTABLE_RENAMEPART
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1@part=part2
+POSTHOOK: query: alter table statsdb1.testpart1 partition (part = 'part2') rename to partition (part = 'part3')
+POSTHOOK: type: ALTERTABLE_RENAMEPART
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Input: statsdb1@testpart1@part=part2
+POSTHOOK: Output: statsdb1@testpart1@part=part2
+POSTHOOK: Output: statsdb1@testpart1@part=part3
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	                    	                    	0                   	1                   	4.0                 	4                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part3') col3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col3                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: alter table statsdb1.testpart1 replace columns (col1 int, col2 string, col4 string) cascade
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1@part=part1
+PREHOOK: Output: statsdb1@testpart1@part=part3
+POSTHOOK: query: alter table statsdb1.testpart1 replace columns (col1 int, col2 string, col4 string) cascade
+POSTHOOK: type: ALTERTABLE_REPLACECOLS
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Output: statsdb1@testpart1
+POSTHOOK: Output: statsdb1@testpart1@part=part1
+POSTHOOK: Output: statsdb1@testpart1@part=part3
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col1                	int                 	27                  	484                 	0                   	8                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col4
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col4
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col4                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: alter table statsdb1.testpart1 change column col1 col1 string
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1
+POSTHOOK: query: alter table statsdb1.testpart1 change column col1 col1 string
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Output: statsdb1@testpart1
+PREHOOK: query: alter table statsdb1.testpart1 partition (part) change column col1 col1 string
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1@part=part1
+PREHOOK: Output: statsdb1@testpart1@part=part3
+POSTHOOK: query: alter table statsdb1.testpart1 partition (part) change column col1 col1 string
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Input: statsdb1@testpart1@part=part1
+POSTHOOK: Input: statsdb1@testpart1@part=part3
+POSTHOOK: Output: statsdb1@testpart1@part=part1
+POSTHOOK: Output: statsdb1@testpart1@part=part3
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col1                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+col2                	string              	                    	                    	0                   	12                  	6.7                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col4
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: statsdb1@testpart1
+POSTHOOK: query: describe formatted statsdb1.testpart1 partition (part = 'part1') col4
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: statsdb1@testpart1
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+col4                	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: alter table statsdb1.testpart1 rename to statsdb2.testpart2
+PREHOOK: type: ALTERTABLE_RENAME
+PREHOOK: Input: statsdb1@testpart1
+PREHOOK: Output: statsdb1@testpart1
+POSTHOOK: query: alter table statsdb1.testpart1 rename to statsdb2.testpart2
+POSTHOOK: type: ALTERTABLE_RENAME
+POSTHOOK: Input: statsdb1@testpart1
+POSTHOOK: Output: statsdb1@testpart1
+POSTHOOK: Output: statsdb2@testpart2
+PREHOOK: query: use statsdb2
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:statsdb2
+POSTHOOK: query: use statsdb2
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:statsdb2
+PREHOOK: query: alter table statsdb2.testpart2 drop partition (part = 'part1')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: statsdb2@testpart2
+PREHOOK: Output: statsdb2@testpart2@part=part1
+POSTHOOK: query: alter table statsdb2.testpart2 drop partition (part = 'part1')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: statsdb2@testpart2
+POSTHOOK: Output: statsdb2@testpart2@part=part1
+PREHOOK: query: drop table statsdb2.testpart2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: statsdb2@testpart2
+PREHOOK: Output: statsdb2@testpart2
+POSTHOOK: query: drop table statsdb2.testpart2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: statsdb2@testpart2
+POSTHOOK: Output: statsdb2@testpart2
+PREHOOK: query: drop table statsdb2.testtable2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: statsdb2@testtable2
+PREHOOK: Output: statsdb2@testtable2
+POSTHOOK: query: drop table statsdb2.testtable2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: statsdb2@testtable2
+POSTHOOK: Output: statsdb2@testtable2
+PREHOOK: query: use default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: use default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: drop database statsdb1
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:statsdb1
+PREHOOK: Output: database:statsdb1
+POSTHOOK: query: drop database statsdb1
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:statsdb1
+POSTHOOK: Output: database:statsdb1
+PREHOOK: query: drop database statsdb2
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:statsdb2
+PREHOOK: Output: database:statsdb2
+POSTHOOK: query: drop database statsdb2
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:statsdb2
+POSTHOOK: Output: database:statsdb2


[04/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/smb_mapjoin_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_5.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_5.q.out
new file mode 100644
index 0000000..152c3e0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_5.q.out
@@ -0,0 +1,1433 @@
+PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket_1
+POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket_1
+PREHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket_2
+POSTHOOK: query: create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket_2
+PREHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket_3
+POSTHOOK: query: create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket_3
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_bucket_1
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_bucket_1
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_bucket_2
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_bucket_2
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_bucket_3
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_bucket_3
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                     Left Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+1	val_1	NULL	NULL	NULL	NULL
+10	val_10	NULL	NULL	NULL	NULL
+3	val_3	NULL	NULL	NULL	NULL
+4	val_4	NULL	NULL	NULL	NULL
+5	val_5	NULL	NULL	NULL	NULL
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                     Right Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	20	val_20
+NULL	NULL	NULL	NULL	23	val_23
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                     Outer Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+1	val_1	NULL	NULL	NULL	NULL
+10	val_10	NULL	NULL	NULL	NULL
+3	val_3	NULL	NULL	NULL	NULL
+4	val_4	NULL	NULL	NULL	NULL
+5	val_5	NULL	NULL	NULL	NULL
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	20	val_20
+NULL	NULL	NULL	NULL	23	val_23
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                     Left Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	25	val_25	NULL	NULL
+NULL	NULL	30	val_30	NULL	NULL
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                     Right Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                     Outer Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a right outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	25	val_25	NULL	NULL
+NULL	NULL	30	val_30	NULL	NULL
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Left Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key left outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+1	val_1	NULL	NULL	NULL	NULL
+10	val_10	NULL	NULL	NULL	NULL
+3	val_3	NULL	NULL	NULL	NULL
+4	val_4	NULL	NULL	NULL	NULL
+5	val_5	NULL	NULL	NULL	NULL
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	25	val_25	NULL	NULL
+NULL	NULL	30	val_30	NULL	NULL
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Right Outer Join1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key right outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	4	val_4
+PREHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 2 Data size: 222 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Outer Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 4 Data size: 457 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket_1
+PREHOOK: Input: default@smb_bucket_2
+PREHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a,c)*/ * from smb_bucket_1 a full outer join smb_bucket_2 b on a.key = b.key full outer join smb_bucket_3 c on b.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket_1
+POSTHOOK: Input: default@smb_bucket_2
+POSTHOOK: Input: default@smb_bucket_3
+#### A masked pattern was here ####
+1	val_1	NULL	NULL	NULL	NULL
+10	val_10	NULL	NULL	NULL	NULL
+3	val_3	NULL	NULL	NULL	NULL
+4	val_4	NULL	NULL	NULL	NULL
+5	val_5	NULL	NULL	NULL	NULL
+NULL	NULL	20	val_20	20	val_20
+NULL	NULL	23	val_23	23	val_23
+NULL	NULL	25	val_25	NULL	NULL
+NULL	NULL	30	val_30	NULL	NULL
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	4	val_4


[11/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/ppd_union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/ppd_union_view.q.out b/ql/src/test/results/clientpositive/llap/ppd_union_view.q.out
new file mode 100644
index 0000000..fa7abcb
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/ppd_union_view.q.out
@@ -0,0 +1,686 @@
+PREHOOK: query: -- test predicate pushdown on a view with a union
+
+drop view v
+PREHOOK: type: DROPVIEW
+POSTHOOK: query: -- test predicate pushdown on a view with a union
+
+drop view v
+POSTHOOK: type: DROPVIEW
+PREHOOK: query: create table t1_new (key string, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1_new
+POSTHOOK: query: create table t1_new (key string, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1_new
+PREHOOK: query: insert overwrite table t1_new partition (ds = '2011-10-15')
+select 'key1', 'value1' from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t1_new@ds=2011-10-15
+POSTHOOK: query: insert overwrite table t1_new partition (ds = '2011-10-15')
+select 'key1', 'value1' from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t1_new@ds=2011-10-15
+POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-15).key SIMPLE []
+POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-15).value SIMPLE []
+PREHOOK: query: insert overwrite table t1_new partition (ds = '2011-10-16')
+select 'key2', 'value2' from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t1_new@ds=2011-10-16
+POSTHOOK: query: insert overwrite table t1_new partition (ds = '2011-10-16')
+select 'key2', 'value2' from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t1_new@ds=2011-10-16
+POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-16).key SIMPLE []
+POSTHOOK: Lineage: t1_new PARTITION(ds=2011-10-16).value SIMPLE []
+PREHOOK: query: create table t1_old (keymap string, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1_old
+POSTHOOK: query: create table t1_old (keymap string, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1_old
+PREHOOK: query: insert overwrite table t1_old partition (ds = '2011-10-13')
+select 'keymap3', 'value3' from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t1_old@ds=2011-10-13
+POSTHOOK: query: insert overwrite table t1_old partition (ds = '2011-10-13')
+select 'keymap3', 'value3' from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t1_old@ds=2011-10-13
+POSTHOOK: Lineage: t1_old PARTITION(ds=2011-10-13).keymap SIMPLE []
+POSTHOOK: Lineage: t1_old PARTITION(ds=2011-10-13).value SIMPLE []
+PREHOOK: query: insert overwrite table t1_old partition (ds = '2011-10-14')
+select 'keymap4', 'value4' from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t1_old@ds=2011-10-14
+POSTHOOK: query: insert overwrite table t1_old partition (ds = '2011-10-14')
+select 'keymap4', 'value4' from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t1_old@ds=2011-10-14
+POSTHOOK: Lineage: t1_old PARTITION(ds=2011-10-14).keymap SIMPLE []
+POSTHOOK: Lineage: t1_old PARTITION(ds=2011-10-14).value SIMPLE []
+PREHOOK: query: create table t1_mapping (key string, keymap string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1_mapping
+POSTHOOK: query: create table t1_mapping (key string, keymap string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1_mapping
+PREHOOK: query: insert overwrite table t1_mapping partition (ds = '2011-10-13')
+select 'key3', 'keymap3' from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t1_mapping@ds=2011-10-13
+POSTHOOK: query: insert overwrite table t1_mapping partition (ds = '2011-10-13')
+select 'key3', 'keymap3' from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t1_mapping@ds=2011-10-13
+POSTHOOK: Lineage: t1_mapping PARTITION(ds=2011-10-13).key SIMPLE []
+POSTHOOK: Lineage: t1_mapping PARTITION(ds=2011-10-13).keymap SIMPLE []
+PREHOOK: query: insert overwrite table t1_mapping partition (ds = '2011-10-14')
+select 'key4', 'keymap4' from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t1_mapping@ds=2011-10-14
+POSTHOOK: query: insert overwrite table t1_mapping partition (ds = '2011-10-14')
+select 'key4', 'keymap4' from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t1_mapping@ds=2011-10-14
+POSTHOOK: Lineage: t1_mapping PARTITION(ds=2011-10-14).key SIMPLE []
+POSTHOOK: Lineage: t1_mapping PARTITION(ds=2011-10-14).keymap SIMPLE []
+PREHOOK: query: create view t1 partitioned on (ds) as
+select * from
+(
+select key, value, ds from t1_new
+union all
+select key, value, t1_old.ds from t1_old join t1_mapping
+on t1_old.keymap = t1_mapping.keymap and
+   t1_old.ds = t1_mapping.ds
+) subq
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@t1_mapping
+PREHOOK: Input: default@t1_new
+PREHOOK: Input: default@t1_old
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: create view t1 partitioned on (ds) as
+select * from
+(
+select key, value, ds from t1_new
+union all
+select key, value, t1_old.ds from t1_old join t1_mapping
+on t1_old.keymap = t1_mapping.keymap and
+   t1_old.ds = t1_mapping.ds
+) subq
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@t1_mapping
+POSTHOOK: Input: default@t1_new
+POSTHOOK: Input: default@t1_old
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: explain extended
+select * from t1 where ds = '2011-10-13'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from t1 where ds = '2011-10-13'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Reducer 4 <- Map 3 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE), Union 2 (CONTAINS)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1_new
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: PARTIAL
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (ds = '2011-10-13') (type: boolean)
+                    Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                      Select Operator
+                        expressions: _col0 (type: string), _col1 (type: string), '2011-10-13' (type: string)
+                        outputColumnNames: _col0, _col1, _col2
+                        Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+                        File Output Operator
+                          compressed: false
+                          GlobalTableId: 0
+#### A masked pattern was here ####
+                          NumFilesPerFileSink: 1
+                          Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+#### A masked pattern was here ####
+                          table:
+                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                              properties:
+                                columns _col0,_col1,_col2
+                                columns.types string:string:string
+                                escape.delim \
+                                hive.serialization.extend.additional.nesting.levels true
+                                serialization.escape.crlf true
+                                serialization.format 1
+                                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          TotalFiles: 1
+                          GatherStats: false
+                          MultiFileSpray: false
+            Execution mode: llap
+            LLAP IO: unknown
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t1_old
+                  properties:
+                    insideView TRUE
+                  Statistics: Num rows: 1 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: keymap is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: keymap (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+                        tag: 0
+                        value expressions: _col1 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2011-10-13
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2011-10-13
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    bucket_count -1
+                    columns keymap,value
+                    columns.comments 
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.t1_old
+                    numFiles 1
+                    numRows 1
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 14
+                    serialization.ddl struct t1_old { string keymap, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 15
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns keymap,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.t1_old
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct t1_old { string keymap, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.t1_old
+                  name: default.t1_old
+            Truncated Path -> Alias:
+              /t1_old/ds=2011-10-13 [t1_old]
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: t1_mapping
+                  properties:
+                    insideView TRUE
+                  Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: keymap is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), keymap (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: string)
+                        Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+                        tag: 1
+                        value expressions: _col0 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2011-10-13
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2011-10-13
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    bucket_count -1
+                    columns key,keymap
+                    columns.comments 
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.t1_mapping
+                    numFiles 1
+                    numRows 1
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 12
+                    serialization.ddl struct t1_mapping { string key, string keymap}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 13
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,keymap
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.t1_mapping
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct t1_mapping { string key, string keymap}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.t1_mapping
+                  name: default.t1_mapping
+            Truncated Path -> Alias:
+              /t1_mapping/ds=2011-10-13 [t1_mapping]
+        Reducer 4 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col1 (type: string)
+                outputColumnNames: _col1, _col3
+                Position of Big Table: 0
+                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col3 (type: string), _col1 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: string), '2011-10-13' (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      GlobalTableId: 0
+#### A masked pattern was here ####
+                      NumFilesPerFileSink: 1
+                      Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+#### A masked pattern was here ####
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          properties:
+                            columns _col0,_col1,_col2
+                            columns.types string:string:string
+                            escape.delim \
+                            hive.serialization.extend.additional.nesting.levels true
+                            serialization.escape.crlf true
+                            serialization.format 1
+                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      TotalFiles: 1
+                      GatherStats: false
+                      MultiFileSpray: false
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 where ds = '2011-10-13'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t1_mapping
+PREHOOK: Input: default@t1_mapping@ds=2011-10-13
+PREHOOK: Input: default@t1_new
+PREHOOK: Input: default@t1_old
+PREHOOK: Input: default@t1_old@ds=2011-10-13
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 where ds = '2011-10-13'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t1_mapping
+POSTHOOK: Input: default@t1_mapping@ds=2011-10-13
+POSTHOOK: Input: default@t1_new
+POSTHOOK: Input: default@t1_old
+POSTHOOK: Input: default@t1_old@ds=2011-10-13
+#### A masked pattern was here ####
+key3	value3	2011-10-13
+PREHOOK: query: select * from t1 where ds = '2011-10-14'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t1_mapping
+PREHOOK: Input: default@t1_mapping@ds=2011-10-14
+PREHOOK: Input: default@t1_new
+PREHOOK: Input: default@t1_old
+PREHOOK: Input: default@t1_old@ds=2011-10-14
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 where ds = '2011-10-14'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t1_mapping
+POSTHOOK: Input: default@t1_mapping@ds=2011-10-14
+POSTHOOK: Input: default@t1_new
+POSTHOOK: Input: default@t1_old
+POSTHOOK: Input: default@t1_old@ds=2011-10-14
+#### A masked pattern was here ####
+key4	value4	2011-10-14
+PREHOOK: query: explain extended
+select * from t1 where ds = '2011-10-15'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from t1 where ds = '2011-10-15'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Union 2 (CONTAINS)
+        Reducer 4 <- Map 3 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE), Union 2 (CONTAINS)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1_new
+                  Statistics: Num rows: 1 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: string), _col1 (type: string), '2011-10-15' (type: string)
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+                      File Output Operator
+                        compressed: false
+                        GlobalTableId: 0
+#### A masked pattern was here ####
+                        NumFilesPerFileSink: 1
+                        Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+#### A masked pattern was here ####
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            properties:
+                              columns _col0,_col1,_col2
+                              columns.types string:string:string
+                              escape.delim \
+                              hive.serialization.extend.additional.nesting.levels true
+                              serialization.escape.crlf true
+                              serialization.format 1
+                              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        TotalFiles: 1
+                        GatherStats: false
+                        MultiFileSpray: false
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2011-10-15
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2011-10-15
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.t1_new
+                    numFiles 1
+                    numRows 1
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 11
+                    serialization.ddl struct t1_new { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 12
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.t1_new
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct t1_new { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.t1_new
+                  name: default.t1_new
+            Truncated Path -> Alias:
+              /t1_new/ds=2011-10-15 [t1_new]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: t1_old
+                  properties:
+                    insideView TRUE
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: PARTIAL
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: ((ds = '2011-10-15') and keymap is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: keymap (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                        tag: 0
+                        value expressions: _col1 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: unknown
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: t1_mapping
+                  properties:
+                    insideView TRUE
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: PARTIAL
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (('2011-10-15' = ds) and keymap is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: PARTIAL
+                    Select Operator
+                      expressions: key (type: string), keymap (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col1 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: string)
+                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                        tag: 1
+                        value expressions: _col0 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: unknown
+        Reducer 4 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col1 (type: string)
+                outputColumnNames: _col1, _col3
+                Position of Big Table: 0
+                Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: PARTIAL
+                Select Operator
+                  expressions: _col3 (type: string), _col1 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
+                  Select Operator
+                    expressions: _col0 (type: string), _col1 (type: string), '2011-10-15' (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      GlobalTableId: 0
+#### A masked pattern was here ####
+                      NumFilesPerFileSink: 1
+                      Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: PARTIAL
+#### A masked pattern was here ####
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          properties:
+                            columns _col0,_col1,_col2
+                            columns.types string:string:string
+                            escape.delim \
+                            hive.serialization.extend.additional.nesting.levels true
+                            serialization.escape.crlf true
+                            serialization.format 1
+                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      TotalFiles: 1
+                      GatherStats: false
+                      MultiFileSpray: false
+        Union 2 
+            Vertex: Union 2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 where ds = '2011-10-15'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t1_mapping
+PREHOOK: Input: default@t1_new
+PREHOOK: Input: default@t1_new@ds=2011-10-15
+PREHOOK: Input: default@t1_old
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 where ds = '2011-10-15'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t1_mapping
+POSTHOOK: Input: default@t1_new
+POSTHOOK: Input: default@t1_new@ds=2011-10-15
+POSTHOOK: Input: default@t1_old
+#### A masked pattern was here ####
+key1	value1	2011-10-15
+PREHOOK: query: select * from t1 where ds = '2011-10-16'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t1_mapping
+PREHOOK: Input: default@t1_new
+PREHOOK: Input: default@t1_new@ds=2011-10-16
+PREHOOK: Input: default@t1_old
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 where ds = '2011-10-16'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t1_mapping
+POSTHOOK: Input: default@t1_new
+POSTHOOK: Input: default@t1_new@ds=2011-10-16
+POSTHOOK: Input: default@t1_old
+#### A masked pattern was here ####
+key2	value2	2011-10-16

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/ppr_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/ppr_pushdown.q.out b/ql/src/test/results/clientpositive/llap/ppr_pushdown.q.out
new file mode 100644
index 0000000..5c59810
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/ppr_pushdown.q.out
@@ -0,0 +1,328 @@
+PREHOOK: query: create table ppr_test (key string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ppr_test
+POSTHOOK: query: create table ppr_test (key string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ppr_test
+PREHOOK: query: alter table ppr_test add partition (ds = '1234')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@ppr_test
+POSTHOOK: query: alter table ppr_test add partition (ds = '1234')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@ppr_test
+POSTHOOK: Output: default@ppr_test@ds=1234
+PREHOOK: query: alter table ppr_test add partition (ds = '1224')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@ppr_test
+POSTHOOK: query: alter table ppr_test add partition (ds = '1224')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@ppr_test
+POSTHOOK: Output: default@ppr_test@ds=1224
+PREHOOK: query: alter table ppr_test add partition (ds = '1214')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@ppr_test
+POSTHOOK: query: alter table ppr_test add partition (ds = '1214')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@ppr_test
+POSTHOOK: Output: default@ppr_test@ds=1214
+PREHOOK: query: alter table ppr_test add partition (ds = '12+4')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@ppr_test
+POSTHOOK: query: alter table ppr_test add partition (ds = '12+4')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@ppr_test
+POSTHOOK: Output: default@ppr_test@ds=12+4
+PREHOOK: query: alter table ppr_test add partition (ds = '12.4')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@ppr_test
+POSTHOOK: query: alter table ppr_test add partition (ds = '12.4')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@ppr_test
+POSTHOOK: Output: default@ppr_test@ds=12.4
+PREHOOK: query: alter table ppr_test add partition (ds = '12:4')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@ppr_test
+POSTHOOK: query: alter table ppr_test add partition (ds = '12:4')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@ppr_test
+POSTHOOK: Output: default@ppr_test@ds=12%3A4
+PREHOOK: query: alter table ppr_test add partition (ds = '12%4')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@ppr_test
+POSTHOOK: query: alter table ppr_test add partition (ds = '12%4')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@ppr_test
+POSTHOOK: Output: default@ppr_test@ds=12%254
+PREHOOK: query: alter table ppr_test add partition (ds = '12*4')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@ppr_test
+POSTHOOK: query: alter table ppr_test add partition (ds = '12*4')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@ppr_test
+POSTHOOK: Output: default@ppr_test@ds=12%2A4
+PREHOOK: query: insert overwrite table ppr_test partition(ds = '1234') select * from (select '1234' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@ppr_test@ds=1234
+POSTHOOK: query: insert overwrite table ppr_test partition(ds = '1234') select * from (select '1234' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@ppr_test@ds=1234
+POSTHOOK: Lineage: ppr_test PARTITION(ds=1234).key EXPRESSION []
+PREHOOK: query: insert overwrite table ppr_test partition(ds = '1224') select * from (select '1224' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@ppr_test@ds=1224
+POSTHOOK: query: insert overwrite table ppr_test partition(ds = '1224') select * from (select '1224' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@ppr_test@ds=1224
+POSTHOOK: Lineage: ppr_test PARTITION(ds=1224).key EXPRESSION []
+PREHOOK: query: insert overwrite table ppr_test partition(ds = '1214') select * from (select '1214' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@ppr_test@ds=1214
+POSTHOOK: query: insert overwrite table ppr_test partition(ds = '1214') select * from (select '1214' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@ppr_test@ds=1214
+POSTHOOK: Lineage: ppr_test PARTITION(ds=1214).key EXPRESSION []
+PREHOOK: query: insert overwrite table ppr_test partition(ds = '12+4') select * from (select '12+4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@ppr_test@ds=12+4
+POSTHOOK: query: insert overwrite table ppr_test partition(ds = '12+4') select * from (select '12+4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@ppr_test@ds=12+4
+POSTHOOK: Lineage: ppr_test PARTITION(ds=12+4).key EXPRESSION []
+PREHOOK: query: insert overwrite table ppr_test partition(ds = '12.4') select * from (select '12.4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@ppr_test@ds=12.4
+POSTHOOK: query: insert overwrite table ppr_test partition(ds = '12.4') select * from (select '12.4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@ppr_test@ds=12.4
+POSTHOOK: Lineage: ppr_test PARTITION(ds=12.4).key EXPRESSION []
+PREHOOK: query: insert overwrite table ppr_test partition(ds = '12:4') select * from (select '12:4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@ppr_test@ds=12%3A4
+POSTHOOK: query: insert overwrite table ppr_test partition(ds = '12:4') select * from (select '12:4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@ppr_test@ds=12%3A4
+POSTHOOK: Lineage: ppr_test PARTITION(ds=12:4).key EXPRESSION []
+PREHOOK: query: insert overwrite table ppr_test partition(ds = '12%4') select * from (select '12%4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@ppr_test@ds=12%254
+POSTHOOK: query: insert overwrite table ppr_test partition(ds = '12%4') select * from (select '12%4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@ppr_test@ds=12%254
+POSTHOOK: Lineage: ppr_test PARTITION(ds=12%4).key EXPRESSION []
+PREHOOK: query: insert overwrite table ppr_test partition(ds = '12*4') select * from (select '12*4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@ppr_test@ds=12%2A4
+POSTHOOK: query: insert overwrite table ppr_test partition(ds = '12*4') select * from (select '12*4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@ppr_test@ds=12%2A4
+POSTHOOK: Lineage: ppr_test PARTITION(ds=12*4).key EXPRESSION []
+PREHOOK: query: select * from ppr_test where ds = '1234' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=1234
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '1234' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=1234
+#### A masked pattern was here ####
+1234	1234
+abcd	1234
+PREHOOK: query: select * from ppr_test where ds = '1224' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=1224
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '1224' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=1224
+#### A masked pattern was here ####
+1224	1224
+abcd	1224
+PREHOOK: query: select * from ppr_test where ds = '1214' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=1214
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '1214' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=1214
+#### A masked pattern was here ####
+1214	1214
+abcd	1214
+PREHOOK: query: select * from ppr_test where ds = '12.4' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12.4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12.4' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12.4
+#### A masked pattern was here ####
+12.4	12.4
+abcd	12.4
+PREHOOK: query: select * from ppr_test where ds = '12+4' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12+4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12+4' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12+4
+#### A masked pattern was here ####
+12+4	12+4
+abcd	12+4
+PREHOOK: query: select * from ppr_test where ds = '12:4' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12%3A4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12:4' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12%3A4
+#### A masked pattern was here ####
+12:4	12:4
+abcd	12:4
+PREHOOK: query: select * from ppr_test where ds = '12%4' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12%254
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12%4' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12%254
+#### A masked pattern was here ####
+12%4	12%4
+abcd	12%4
+PREHOOK: query: select * from ppr_test where ds = '12*4' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12%2A4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12*4' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12%2A4
+#### A masked pattern was here ####
+12*4	12*4
+abcd	12*4
+PREHOOK: query: select * from ppr_test where ds = '12.*4' order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12.*4' order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+#### A masked pattern was here ####
+PREHOOK: query: select * from ppr_test where ds = '1234' and key = '1234'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=1234
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '1234' and key = '1234'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=1234
+#### A masked pattern was here ####
+1234	1234
+PREHOOK: query: select * from ppr_test where ds = '1224' and key = '1224'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=1224
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '1224' and key = '1224'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=1224
+#### A masked pattern was here ####
+1224	1224
+PREHOOK: query: select * from ppr_test where ds = '1214' and key = '1214'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=1214
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '1214' and key = '1214'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=1214
+#### A masked pattern was here ####
+1214	1214
+PREHOOK: query: select * from ppr_test where ds = '12.4' and key = '12.4'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12.4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12.4' and key = '12.4'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12.4
+#### A masked pattern was here ####
+12.4	12.4
+PREHOOK: query: select * from ppr_test where ds = '12+4' and key = '12+4'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12+4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12+4' and key = '12+4'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12+4
+#### A masked pattern was here ####
+12+4	12+4
+PREHOOK: query: select * from ppr_test where ds = '12:4' and key = '12:4'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12%3A4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12:4' and key = '12:4'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12%3A4
+#### A masked pattern was here ####
+12:4	12:4
+PREHOOK: query: select * from ppr_test where ds = '12%4' and key = '12%4'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12%254
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12%4' and key = '12%4'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12%254
+#### A masked pattern was here ####
+12%4	12%4
+PREHOOK: query: select * from ppr_test where ds = '12*4' and key = '12*4'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ppr_test
+PREHOOK: Input: default@ppr_test@ds=12%2A4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ppr_test where ds = '12*4' and key = '12*4'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ppr_test
+POSTHOOK: Input: default@ppr_test@ds=12%2A4
+#### A masked pattern was here ####
+12*4	12*4


[31/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out
new file mode 100644
index 0000000..84de3e3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out
@@ -0,0 +1,1234 @@
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+PREHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part_2
+                    numFiles 2
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 3062
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part_2
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part_2
+                  name: default.srcbucket_mapjoin_part_2
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part_2/ds=2008-04-08 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col7
+                Position of Big Table: 0
+                Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 0
+                          numRows 0
+                          rawDataSize 0
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 0
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+564
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+564
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 29 Data size: 3294 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part_2
+                    numFiles 2
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 3062
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part_2
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part_2
+                  name: default.srcbucket_mapjoin_part_2
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part_2/ds=2008-04-08 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col7
+                Position of Big Table: 0
+                Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 1
+                          numRows 564
+                          rawDataSize 10503
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 11067
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 1
+                numRows 564
+                rawDataSize 10503
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 11067
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+564
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key and b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+564
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0
+PREHOOK: query: -- HIVE-3210
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: -- HIVE-3210
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-09
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 58 Data size: 6588 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 58 Data size: 6588 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 58 Data size: 6588 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part_2
+                    numFiles 2
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 3062
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part_2
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part_2
+                  name: default.srcbucket_mapjoin_part_2
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-09
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-09
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part_2
+                    numFiles 2
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 3062
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part_2
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part_2 { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part_2
+                  name: default.srcbucket_mapjoin_part_2
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part_2/ds=2008-04-08 [b]
+              /srcbucket_mapjoin_part_2/ds=2008-04-09 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col7
+                Position of Big Table: 1
+                Statistics: Num rows: 63 Data size: 7246 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 63 Data size: 7246 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 63 Data size: 7246 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 1
+                          numRows 564
+                          rawDataSize 10503
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 11067
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 1
+                numRows 564
+                rawDataSize 10503
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 11067
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+1128
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-09
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin_part)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+1128
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0


[26/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/columnstats_part_coltype.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/columnstats_part_coltype.q.out b/ql/src/test/results/clientpositive/llap/columnstats_part_coltype.q.out
new file mode 100644
index 0000000..d52f020
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/columnstats_part_coltype.q.out
@@ -0,0 +1,441 @@
+PREHOOK: query: -- Test type date, int, and string in partition column
+drop table if exists partcolstats
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- Test type date, int, and string in partition column
+drop table if exists partcolstats
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table partcolstats (key int, value string) partitioned by (ds date, hr int, part string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@partcolstats
+POSTHOOK: query: create table partcolstats (key int, value string) partitioned by (ds date, hr int, part string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@partcolstats
+PREHOOK: query: insert into partcolstats partition (ds=date '2015-04-02', hr=2, part='partA') select key, value from src limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+POSTHOOK: query: insert into partcolstats partition (ds=date '2015-04-02', hr=2, part='partA') select key, value from src limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-02,hr=2,part=partA).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-02,hr=2,part=partA).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') select key, value from src limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@partcolstats@ds=2015-04-02/hr=2/part=partB
+POSTHOOK: query: insert into partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') select key, value from src limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partcolstats@ds=2015-04-02/hr=2/part=partB
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-02,hr=2,part=partB).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-02,hr=2,part=partB).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') select key, value from src limit 30
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@partcolstats@ds=2015-04-02/hr=3/part=partA
+POSTHOOK: query: insert into partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') select key, value from src limit 30
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partcolstats@ds=2015-04-02/hr=3/part=partA
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-02,hr=3,part=partA).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-02,hr=3,part=partA).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') select key, value from src limit 40
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@partcolstats@ds=2015-04-03/hr=3/part=partA
+POSTHOOK: query: insert into partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') select key, value from src limit 40
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partcolstats@ds=2015-04-03/hr=3/part=partA
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-03,hr=3,part=partA).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-03,hr=3,part=partA).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') select key, value from src limit 60
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@partcolstats@ds=2015-04-03/hr=3/part=partB
+POSTHOOK: query: insert into partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') select key, value from src limit 60
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partcolstats@ds=2015-04-03/hr=3/part=partB
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-03,hr=3,part=partB).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: partcolstats PARTITION(ds=2015-04-03,hr=3,part=partB).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: analyze table partcolstats partition (ds=date '2015-04-02', hr=2, part='partA') compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@partcolstats
+PREHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table partcolstats partition (ds=date '2015-04-02', hr=2, part='partA') compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@partcolstats
+POSTHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partA') key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partA') key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+key                 	int                 	27                  	484                 	0                   	18                  	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partA') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partA') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	                    	                    	0                   	18                  	6.8                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+key                 	int                 	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: analyze table partcolstats partition (ds=date '2015-04-02', hr=2, part) compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@partcolstats
+PREHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+PREHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partB
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table partcolstats partition (ds=date '2015-04-02', hr=2, part) compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@partcolstats
+POSTHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+POSTHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partB
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+key                 	int                 	27                  	484                 	0                   	18                  	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=2, part='partB') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	                    	                    	0                   	18                  	6.8                 	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+key                 	int                 	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: analyze table partcolstats partition (ds=date '2015-04-02', hr, part) compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@partcolstats
+PREHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+PREHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partB
+PREHOOK: Input: default@partcolstats@ds=2015-04-02/hr=3/part=partA
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table partcolstats partition (ds=date '2015-04-02', hr, part) compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@partcolstats
+POSTHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+POSTHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partB
+POSTHOOK: Input: default@partcolstats@ds=2015-04-02/hr=3/part=partA
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+key                 	int                 	27                  	495                 	0                   	28                  	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-02', hr=3, part='partA') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	                    	                    	0                   	18                  	6.833333333333333   	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+key                 	int                 	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+key                 	int                 	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	comment             	 	 	 	 	 	 	 	 
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	from deserializer   	 	 	 	 	 	 	 	 
+PREHOOK: query: analyze table partcolstats partition (ds, hr, part) compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@partcolstats
+PREHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+PREHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partB
+PREHOOK: Input: default@partcolstats@ds=2015-04-02/hr=3/part=partA
+PREHOOK: Input: default@partcolstats@ds=2015-04-03/hr=3/part=partA
+PREHOOK: Input: default@partcolstats@ds=2015-04-03/hr=3/part=partB
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table partcolstats partition (ds, hr, part) compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@partcolstats
+POSTHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partA
+POSTHOOK: Input: default@partcolstats@ds=2015-04-02/hr=2/part=partB
+POSTHOOK: Input: default@partcolstats@ds=2015-04-02/hr=3/part=partA
+POSTHOOK: Input: default@partcolstats@ds=2015-04-03/hr=3/part=partA
+POSTHOOK: Input: default@partcolstats@ds=2015-04-03/hr=3/part=partB
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+key                 	int                 	15                  	495                 	0                   	43                  	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partA') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	                    	                    	0                   	34                  	6.825               	7                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+key                 	int                 	15                  	495                 	0                   	51                  	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstats
+POSTHOOK: query: describe formatted partcolstats partition (ds=date '2015-04-03', hr=3, part='partB') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstats
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	                    	                    	0                   	53                  	6.883333333333334   	7                   	                    	                    	from deserializer   
+PREHOOK: query: drop table partcolstats
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@partcolstats
+PREHOOK: Output: default@partcolstats
+POSTHOOK: query: drop table partcolstats
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@partcolstats
+POSTHOOK: Output: default@partcolstats
+PREHOOK: query: -- Test type tinyint, smallint, and bigint in partition column
+drop table if exists partcolstatsnum
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- Test type tinyint, smallint, and bigint in partition column
+drop table if exists partcolstatsnum
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table partcolstatsnum (key int, value string) partitioned by (tint tinyint, sint smallint, bint bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@partcolstatsnum
+POSTHOOK: query: create table partcolstatsnum (key int, value string) partitioned by (tint tinyint, sint smallint, bint bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@partcolstatsnum
+PREHOOK: query: insert into partcolstatsnum partition (tint=100, sint=1000, bint=1000000) select key, value from src limit 30
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@partcolstatsnum@tint=100/sint=1000/bint=1000000
+POSTHOOK: query: insert into partcolstatsnum partition (tint=100, sint=1000, bint=1000000) select key, value from src limit 30
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partcolstatsnum@tint=100/sint=1000/bint=1000000
+POSTHOOK: Lineage: partcolstatsnum PARTITION(tint=100,sint=1000,bint=1000000).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: partcolstatsnum PARTITION(tint=100,sint=1000,bint=1000000).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: analyze table partcolstatsnum partition (tint=100, sint=1000, bint=1000000) compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@partcolstatsnum
+PREHOOK: Input: default@partcolstatsnum@tint=100/sint=1000/bint=1000000
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table partcolstatsnum partition (tint=100, sint=1000, bint=1000000) compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@partcolstatsnum
+POSTHOOK: Input: default@partcolstatsnum@tint=100/sint=1000/bint=1000000
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted partcolstatsnum partition (tint=100, sint=1000, bint=1000000) value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstatsnum
+POSTHOOK: query: describe formatted partcolstatsnum partition (tint=100, sint=1000, bint=1000000) value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstatsnum
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	                    	                    	0                   	18                  	6.833333333333333   	7                   	                    	                    	from deserializer   
+PREHOOK: query: drop table partcolstatsnum
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@partcolstatsnum
+PREHOOK: Output: default@partcolstatsnum
+POSTHOOK: query: drop table partcolstatsnum
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@partcolstatsnum
+POSTHOOK: Output: default@partcolstatsnum
+PREHOOK: query: -- Test type decimal in partition column
+drop table if exists partcolstatsdec
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- Test type decimal in partition column
+drop table if exists partcolstatsdec
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table partcolstatsdec (key int, value string) partitioned by (decpart decimal(8,4))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@partcolstatsdec
+POSTHOOK: query: create table partcolstatsdec (key int, value string) partitioned by (decpart decimal(8,4))
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@partcolstatsdec
+PREHOOK: query: insert into partcolstatsdec partition (decpart='1000.0001') select key, value from src limit 30
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@partcolstatsdec@decpart=1000.0001
+POSTHOOK: query: insert into partcolstatsdec partition (decpart='1000.0001') select key, value from src limit 30
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partcolstatsdec@decpart=1000.0001
+POSTHOOK: Lineage: partcolstatsdec PARTITION(decpart=1000.0001).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: partcolstatsdec PARTITION(decpart=1000.0001).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: analyze table partcolstatsdec partition (decpart='1000.0001') compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@partcolstatsdec
+PREHOOK: Input: default@partcolstatsdec@decpart=1000.0001
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table partcolstatsdec partition (decpart='1000.0001') compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@partcolstatsdec
+POSTHOOK: Input: default@partcolstatsdec@decpart=1000.0001
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted partcolstatsdec partition (decpart='1000.0001') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstatsdec
+POSTHOOK: query: describe formatted partcolstatsdec partition (decpart='1000.0001') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstatsdec
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	                    	                    	0                   	18                  	6.833333333333333   	7                   	                    	                    	from deserializer   
+PREHOOK: query: drop table partcolstatsdec
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@partcolstatsdec
+PREHOOK: Output: default@partcolstatsdec
+POSTHOOK: query: drop table partcolstatsdec
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@partcolstatsdec
+POSTHOOK: Output: default@partcolstatsdec
+PREHOOK: query: -- Test type varchar and char in partition column
+drop table if exists partcolstatschar
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- Test type varchar and char in partition column
+drop table if exists partcolstatschar
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table partcolstatschar (key int, value string) partitioned by (varpart varchar(5), charpart char(3))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@partcolstatschar
+POSTHOOK: query: create table partcolstatschar (key int, value string) partitioned by (varpart varchar(5), charpart char(3))
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@partcolstatschar
+PREHOOK: query: insert into partcolstatschar partition (varpart='part1', charpart='aaa') select key, value from src limit 30
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@partcolstatschar@varpart=part1/charpart=aaa
+POSTHOOK: query: insert into partcolstatschar partition (varpart='part1', charpart='aaa') select key, value from src limit 30
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@partcolstatschar@varpart=part1/charpart=aaa
+POSTHOOK: Lineage: partcolstatschar PARTITION(varpart=part1,charpart=aaa).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: partcolstatschar PARTITION(varpart=part1,charpart=aaa).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: analyze table partcolstatschar partition (varpart='part1', charpart='aaa') compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@partcolstatschar
+PREHOOK: Input: default@partcolstatschar@varpart=part1/charpart=aaa
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table partcolstatschar partition (varpart='part1', charpart='aaa') compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@partcolstatschar
+POSTHOOK: Input: default@partcolstatschar@varpart=part1/charpart=aaa
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted partcolstatschar partition (varpart='part1', charpart='aaa') value
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@partcolstatschar
+POSTHOOK: query: describe formatted partcolstatschar partition (varpart='part1', charpart='aaa') value
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@partcolstatschar
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+value               	string              	                    	                    	0                   	18                  	6.833333333333333   	7                   	                    	                    	from deserializer   
+PREHOOK: query: drop table partcolstatschar
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@partcolstatschar
+PREHOOK: Output: default@partcolstatschar
+POSTHOOK: query: drop table partcolstatschar
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@partcolstatschar
+POSTHOOK: Output: default@partcolstatschar


[23/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/extrapolate_part_stats_partial_ndv.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/extrapolate_part_stats_partial_ndv.q.out b/ql/src/test/results/clientpositive/llap/extrapolate_part_stats_partial_ndv.q.out
new file mode 100644
index 0000000..839c30e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/extrapolate_part_stats_partial_ndv.q.out
@@ -0,0 +1,1301 @@
+PREHOOK: query: drop table if exists ext_loc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists ext_loc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table ext_loc (
+  state string,
+  locid double,
+  cnt decimal,
+  zip int,
+  year string
+) row format delimited fields terminated by '|' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ext_loc
+POSTHOOK: query: create table ext_loc (
+  state string,
+  locid double,
+  cnt decimal,
+  zip int,
+  year string
+) row format delimited fields terminated by '|' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ext_loc
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial_ndv.txt' OVERWRITE INTO TABLE ext_loc
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@ext_loc
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial_ndv.txt' OVERWRITE INTO TABLE ext_loc
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@ext_loc
+PREHOOK: query: drop table if exists loc_orc_1d
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists loc_orc_1d
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table loc_orc_1d (
+  state string,
+  locid double,
+  cnt decimal,
+  zip int
+) partitioned by(year string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@loc_orc_1d
+POSTHOOK: query: create table loc_orc_1d (
+  state string,
+  locid double,
+  cnt decimal,
+  zip int
+) partitioned by(year string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@loc_orc_1d
+PREHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ext_loc
+PREHOOK: Output: default@loc_orc_1d
+POSTHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ext_loc
+POSTHOOK: Output: default@loc_orc_1d@year=2000
+POSTHOOK: Output: default@loc_orc_1d@year=2001
+POSTHOOK: Output: default@loc_orc_1d@year=2002
+POSTHOOK: Output: default@loc_orc_1d@year=2003
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ]
+PREHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid,cnt,zip
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_1d
+PREHOOK: Input: default@loc_orc_1d@year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid,cnt,zip
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_1d
+POSTHOOK: Input: default@loc_orc_1d@year=2001
+#### A masked pattern was here ####
+PREHOOK: query: analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid,cnt,zip
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_1d
+PREHOOK: Input: default@loc_orc_1d@year=2002
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid,cnt,zip
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_1d
+POSTHOOK: Input: default@loc_orc_1d@year=2002
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') state
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') state
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+state               	string              	                    	                    	0                   	3                   	0.75                	2                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') state
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') state
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+state               	string              	                    	                    	0                   	6                   	3.0                 	3                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') locid
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') locid
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+locid               	double              	1.0                 	4.0                 	0                   	5                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') locid
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') locid
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+locid               	double              	1.0                 	5.0                 	0                   	6                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') cnt
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') cnt
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+cnt                 	decimal(10,0)       	10                  	2000                	0                   	5                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') cnt
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') cnt
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+cnt                 	decimal(10,0)       	10                  	910                 	0                   	4                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') zip
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2001') zip
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+zip                 	int                 	43201               	94087               	0                   	4                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') zip
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2002') zip
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+zip                 	int                 	43201               	94087               	0                   	4                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: explain extended select state,locid,cnt,zip from loc_orc_1d
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select state,locid,cnt,zip from loc_orc_1d
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2000
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt,zip
+              columns.comments 
+              columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+              name default.loc_orc_1d
+              numFiles 1
+              numRows 2
+              partition_columns year
+              partition_columns.types string
+              rawDataSize 416
+              serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 531
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt,zip
+                columns.comments 
+                columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+                name default.loc_orc_1d
+                partition_columns year
+                partition_columns.types string
+                serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_1d
+            name: default.loc_orc_1d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2001
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"cnt":"true","locid":"true","state":"true","zip":"true"}}
+              bucket_count -1
+              columns state,locid,cnt,zip
+              columns.comments 
+              columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+              name default.loc_orc_1d
+              numFiles 1
+              numRows 4
+              partition_columns year
+              partition_columns.types string
+              rawDataSize 832
+              serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 562
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt,zip
+                columns.comments 
+                columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+                name default.loc_orc_1d
+                partition_columns year
+                partition_columns.types string
+                serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_1d
+            name: default.loc_orc_1d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2002
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"cnt":"true","locid":"true","state":"true","zip":"true"}}
+              bucket_count -1
+              columns state,locid,cnt,zip
+              columns.comments 
+              columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+              name default.loc_orc_1d
+              numFiles 1
+              numRows 6
+              partition_columns year
+              partition_columns.types string
+              rawDataSize 1266
+              serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 580
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt,zip
+                columns.comments 
+                columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+                name default.loc_orc_1d
+                partition_columns year
+                partition_columns.types string
+                serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_1d
+            name: default.loc_orc_1d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2003
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt,zip
+              columns.comments 
+              columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+              name default.loc_orc_1d
+              numFiles 1
+              numRows 8
+              partition_columns year
+              partition_columns.types string
+              rawDataSize 1672
+              serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 602
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt,zip
+                columns.comments 
+                columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+                name default.loc_orc_1d
+                partition_columns year
+                partition_columns.types string
+                serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_1d
+            name: default.loc_orc_1d
+      Processor Tree:
+        TableScan
+          alias: loc_orc_1d
+          GatherStats: false
+          Select Operator
+            expressions: state (type: string), locid (type: double), cnt (type: decimal(10,0)), zip (type: int)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            ListSink
+
+PREHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state,locid,cnt,zip
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_1d
+PREHOOK: Input: default@loc_orc_1d@year=2000
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state,locid,cnt,zip
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_1d
+POSTHOOK: Input: default@loc_orc_1d@year=2000
+#### A masked pattern was here ####
+PREHOOK: query: analyze table loc_orc_1d partition(year='2003') compute statistics for columns state,locid,cnt,zip
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_1d
+PREHOOK: Input: default@loc_orc_1d@year=2003
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table loc_orc_1d partition(year='2003') compute statistics for columns state,locid,cnt,zip
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_1d
+POSTHOOK: Input: default@loc_orc_1d@year=2003
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2000') state
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2000') state
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+state               	string              	                    	                    	0                   	2                   	0.5                 	1                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2003') state
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2003') state
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+state               	string              	                    	                    	0                   	4                   	1.25                	4                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2000') locid
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2000') locid
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+locid               	double              	1.0                 	2.0                 	0                   	2                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2003') locid
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2003') locid
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+locid               	double              	1.0                 	31.0                	0                   	6                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2000') cnt
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2000') cnt
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+cnt                 	decimal(10,0)       	1000                	1010                	0                   	3                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2003') cnt
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2003') cnt
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+cnt                 	decimal(10,0)       	1000                	2000                	0                   	3                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2000') zip
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2000') zip
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+zip                 	int                 	94086               	94087               	0                   	2                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_1d PARTITION(year='2003') zip
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_1d
+POSTHOOK: query: describe formatted loc_orc_1d PARTITION(year='2003') zip
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_1d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+zip                 	int                 	43201               	94087               	0                   	4                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: explain extended select state,locid,cnt,zip from loc_orc_1d
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select state,locid,cnt,zip from loc_orc_1d
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2000
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"cnt":"true","locid":"true","state":"true","zip":"true"}}
+              bucket_count -1
+              columns state,locid,cnt,zip
+              columns.comments 
+              columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+              name default.loc_orc_1d
+              numFiles 1
+              numRows 2
+              partition_columns year
+              partition_columns.types string
+              rawDataSize 416
+              serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 531
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt,zip
+                columns.comments 
+                columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+                name default.loc_orc_1d
+                partition_columns year
+                partition_columns.types string
+                serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_1d
+            name: default.loc_orc_1d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2001
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"cnt":"true","locid":"true","state":"true","zip":"true"}}
+              bucket_count -1
+              columns state,locid,cnt,zip
+              columns.comments 
+              columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+              name default.loc_orc_1d
+              numFiles 1
+              numRows 4
+              partition_columns year
+              partition_columns.types string
+              rawDataSize 832
+              serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 562
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt,zip
+                columns.comments 
+                columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+                name default.loc_orc_1d
+                partition_columns year
+                partition_columns.types string
+                serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_1d
+            name: default.loc_orc_1d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2002
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"cnt":"true","locid":"true","state":"true","zip":"true"}}
+              bucket_count -1
+              columns state,locid,cnt,zip
+              columns.comments 
+              columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+              name default.loc_orc_1d
+              numFiles 1
+              numRows 6
+              partition_columns year
+              partition_columns.types string
+              rawDataSize 1266
+              serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 580
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt,zip
+                columns.comments 
+                columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+                name default.loc_orc_1d
+                partition_columns year
+                partition_columns.types string
+                serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_1d
+            name: default.loc_orc_1d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2003
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"cnt":"true","locid":"true","state":"true","zip":"true"}}
+              bucket_count -1
+              columns state,locid,cnt,zip
+              columns.comments 
+              columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+              name default.loc_orc_1d
+              numFiles 1
+              numRows 8
+              partition_columns year
+              partition_columns.types string
+              rawDataSize 1672
+              serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 602
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt,zip
+                columns.comments 
+                columns.types string:double:decimal(10,0):int
+#### A masked pattern was here ####
+                name default.loc_orc_1d
+                partition_columns year
+                partition_columns.types string
+                serialization.ddl struct loc_orc_1d { string state, double locid, decimal(10,0) cnt, i32 zip}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_1d
+            name: default.loc_orc_1d
+      Processor Tree:
+        TableScan
+          alias: loc_orc_1d
+          GatherStats: false
+          Select Operator
+            expressions: state (type: string), locid (type: double), cnt (type: decimal(10,0)), zip (type: int)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            ListSink
+
+PREHOOK: query: drop table if exists loc_orc_2d
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists loc_orc_2d
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table loc_orc_2d (
+  state string,
+  locid int,
+  cnt decimal
+) partitioned by(zip int, year string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@loc_orc_2d
+POSTHOOK: query: create table loc_orc_2d (
+  state string,
+  locid int,
+  cnt decimal
+) partitioned by(zip int, year string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@loc_orc_2d
+PREHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ext_loc
+PREHOOK: Output: default@loc_orc_2d
+POSTHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ext_loc
+POSTHOOK: Output: default@loc_orc_2d@zip=43201/year=2001
+POSTHOOK: Output: default@loc_orc_2d@zip=43201/year=2002
+POSTHOOK: Output: default@loc_orc_2d@zip=43201/year=2003
+POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2000
+POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001
+POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2002
+POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2003
+POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2000
+POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2001
+POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2002
+POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2003
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2001).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2001).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2002).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2002).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2003).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2003).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2002).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2002).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2003).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2003).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2002).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2002).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2003).cnt SIMPLE [(ext_loc)ext_loc.FieldSchema(name:cnt, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2003).locid EXPRESSION [(ext_loc)ext_loc.FieldSchema(name:locid, type:double, comment:null), ]
+POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ]
+PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid,cnt
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_2d
+PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2001
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid,cnt
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_2d
+POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2001
+#### A masked pattern was here ####
+PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid,cnt
+PREHOOK: type: QUERY
+PREHOOK: Input: default@loc_orc_2d
+PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2002
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid,cnt
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@loc_orc_2d
+POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2002
+#### A masked pattern was here ####
+PREHOOK: query: describe formatted loc_orc_2d partition(zip=94086, year='2001') state
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_2d
+POSTHOOK: query: describe formatted loc_orc_2d partition(zip=94086, year='2001') state
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_2d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+state               	string              	                    	                    	0                   	2                   	0.5                 	1                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_2d partition(zip=94087, year='2002') state
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_2d
+POSTHOOK: query: describe formatted loc_orc_2d partition(zip=94087, year='2002') state
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_2d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+state               	string              	                    	                    	0                   	4                   	3.0                 	3                   	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_2d partition(zip=94086, year='2001') locid
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_2d
+POSTHOOK: query: describe formatted loc_orc_2d partition(zip=94086, year='2001') locid
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_2d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+locid               	int                 	2                   	3                   	0                   	2                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_2d partition(zip=94087, year='2002') locid
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_2d
+POSTHOOK: query: describe formatted loc_orc_2d partition(zip=94087, year='2002') locid
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_2d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+locid               	int                 	1                   	5                   	0                   	3                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_2d partition(zip=94086, year='2001') cnt
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_2d
+POSTHOOK: query: describe formatted loc_orc_2d partition(zip=94086, year='2001') cnt
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_2d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+cnt                 	decimal(10,0)       	1000                	2000                	0                   	2                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: describe formatted loc_orc_2d partition(zip=94087, year='2002') cnt
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@loc_orc_2d
+POSTHOOK: query: describe formatted loc_orc_2d partition(zip=94087, year='2002') cnt
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@loc_orc_2d
+# col_name            	data_type           	min                 	max                 	num_nulls           	distinct_count      	avg_col_len         	max_col_len         	num_trues           	num_falses          	comment             
+	 	 	 	 	 	 	 	 	 	 
+cnt                 	decimal(10,0)       	10                  	100                 	0                   	2                   	                    	                    	                    	                    	from deserializer   
+PREHOOK: query: explain extended select state,locid,cnt,zip from loc_orc_2d
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select state,locid,cnt,zip from loc_orc_2d
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2001
+              zip 43201
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 1
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 202
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 386
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2002
+              zip 43201
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 2
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 406
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 409
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2003
+              zip 43201
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 3
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 603
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 423
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2000
+              zip 94086
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 1
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 201
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 383
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2001
+              zip 94086
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"cnt":"true","locid":"true","state":"true"}}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 2
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 400
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 394
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2002
+              zip 94086
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 1
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 203
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 387
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2003
+              zip 94086
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 2
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 404
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 409
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2000
+              zip 94087
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 1
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 200
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 366
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2001
+              zip 94087
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 1
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 200
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 361
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2002
+              zip 94087
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"cnt":"true","locid":"true","state":"true"}}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 3
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 609
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 412
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+            output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+            partition values:
+              year 2003
+              zip 94087
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns state,locid,cnt
+              columns.comments 
+              columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+              name default.loc_orc_2d
+              numFiles 1
+              numRows 3
+              partition_columns zip/year
+              partition_columns.types int:string
+              rawDataSize 600
+              serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              totalSize 412
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+          
+              input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+              output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+              properties:
+                bucket_count -1
+                columns state,locid,cnt
+                columns.comments 
+                columns.types string:int:decimal(10,0)
+#### A masked pattern was here ####
+                name default.loc_orc_2d
+                partition_columns zip/year
+                partition_columns.types int:string
+                serialization.ddl struct loc_orc_2d { string state, i32 locid, decimal(10,0) cnt}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+              name: default.loc_orc_2d
+            name: default.loc_orc_2d
+      Processor Tree:
+        TableScan
+          alias: loc_orc_2d
+          GatherStats: false
+          Select Operator
+            expressions: state (type: string), locid (type: int), cnt (type: decimal(10,0)), zip (type: int)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/groupby_grouping_id2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/groupby_grouping_id2.q.out b/ql/src/test/results/clientpositive/llap/groupby_grouping_id2.q.out
new file mode 100644
index 0000000..544a7ae
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/groupby_grouping_id2.q.out
@@ -0,0 +1,234 @@
+PREHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1
+POSTHOOK: query: CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@t1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@t1
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	1	3	1
+1	NULL	1	2
+1	NULL	3	1
+2	2	3	1
+2	NULL	1	1
+3	3	3	1
+3	NULL	1	2
+3	NULL	3	1
+4	5	3	1
+4	NULL	1	1
+NULL	NULL	0	6
+PREHOOK: query: SELECT GROUPING__ID, count(*)
+FROM
+(
+SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP
+) t 
+GROUP BY GROUPING__ID
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT GROUPING__ID, count(*)
+FROM
+(
+SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP
+) t 
+GROUP BY GROUPING__ID
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+0	1
+1	4
+3	6
+PREHOOK: query: SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1  GROUP BY key,value WITH ROLLUP) t1
+JOIN 
+(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2
+ON t1.GROUPING__ID = t2.GROUPING__ID
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1  GROUP BY key,value WITH ROLLUP) t1
+JOIN 
+(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2
+ON t1.GROUPING__ID = t2.GROUPING__ID
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+0	0
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+PREHOOK: query: SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1	1	3	1
+1	NULL	1	2
+1	NULL	3	1
+2	2	3	1
+2	NULL	1	1
+3	3	3	1
+3	NULL	1	2
+3	NULL	3	1
+4	5	3	1
+4	NULL	1	1
+NULL	NULL	0	6
+PREHOOK: query: SELECT GROUPING__ID, count(*)
+FROM
+(
+SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP
+) t 
+GROUP BY GROUPING__ID
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT GROUPING__ID, count(*)
+FROM
+(
+SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP
+) t 
+GROUP BY GROUPING__ID
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+0	1
+1	4
+3	6
+PREHOOK: query: SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1  GROUP BY key,value WITH ROLLUP) t1
+JOIN 
+(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2
+ON t1.GROUPING__ID = t2.GROUPING__ID
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT t1.GROUPING__ID, t2.GROUPING__ID FROM (SELECT GROUPING__ID FROM T1  GROUP BY key,value WITH ROLLUP) t1
+JOIN 
+(SELECT GROUPING__ID FROM T1 GROUP BY key, value WITH ROLLUP) t2
+ON t1.GROUPING__ID = t2.GROUPING__ID
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+0	0
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+1	1
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3
+3	3


[03/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/smb_mapjoin_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_6.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_6.q.out
new file mode 100644
index 0000000..bbf9b46
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_6.q.out
@@ -0,0 +1,2755 @@
+PREHOOK: query: CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket4_1
+POSTHOOK: query: CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket4_1
+PREHOOK: query: CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_bucket4_2
+POSTHOOK: query: CREATE TABLE smb_bucket4_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_bucket4_2
+PREHOOK: query: create table smb_join_results(k1 int, v1 string, k2 int, v2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_join_results
+POSTHOOK: query: create table smb_join_results(k1 int, v1 string, k2 int, v2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_join_results
+PREHOOK: query: create table normal_join_results(k1 int, v1 string, k2 int, v2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@normal_join_results
+POSTHOOK: query: create table normal_join_results(k1 int, v1 string, k2 int, v2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@normal_join_results
+PREHOOK: query: insert overwrite table smb_bucket4_1
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@smb_bucket4_1
+POSTHOOK: query: insert overwrite table smb_bucket4_1
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@smb_bucket4_1
+POSTHOOK: Lineage: smb_bucket4_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table smb_bucket4_2
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@smb_bucket4_2
+POSTHOOK: query: insert overwrite table smb_bucket4_2
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@smb_bucket4_2
+POSTHOOK: Lineage: smb_bucket4_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Statistics: Num rows: 550 Data size: 5293 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 550 Data size: 5293 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 550 Data size: 5293 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.smb_join_results
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.smb_join_results
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket4_1
+PREHOOK: Input: default@smb_bucket4_2
+PREHOOK: Output: default@smb_join_results
+POSTHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket4_1
+POSTHOOK: Input: default@smb_bucket4_2
+POSTHOOK: Output: default@smb_join_results
+POSTHOOK: Lineage: smb_join_results.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from smb_join_results order by k1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_join_results
+#### A masked pattern was here ####
+POSTHOOK: query: select * from smb_join_results order by k1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_join_results
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+2	val_2	2	val_2
+4	val_4	4	val_4
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+8	val_8	8	val_8
+9	val_9	9	val_9
+10	val_10	10	val_10
+11	val_11	11	val_11
+12	val_12	12	val_12
+12	val_12	12	val_12
+12	val_12	12	val_12
+12	val_12	12	val_12
+15	val_15	15	val_15
+15	val_15	15	val_15
+15	val_15	15	val_15
+15	val_15	15	val_15
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+20	val_20	20	val_20
+24	val_24	24	val_24
+24	val_24	24	val_24
+24	val_24	24	val_24
+24	val_24	24	val_24
+26	val_26	26	val_26
+26	val_26	26	val_26
+26	val_26	26	val_26
+26	val_26	26	val_26
+27	val_27	27	val_27
+28	val_28	28	val_28
+30	val_30	30	val_30
+33	val_33	33	val_33
+34	val_34	34	val_34
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+37	val_37	37	val_37
+37	val_37	37	val_37
+37	val_37	37	val_37
+37	val_37	37	val_37
+41	val_41	41	val_41
+42	val_42	42	val_42
+42	val_42	42	val_42
+42	val_42	42	val_42
+42	val_42	42	val_42
+43	val_43	43	val_43
+44	val_44	44	val_44
+47	val_47	47	val_47
+51	val_51	51	val_51
+51	val_51	51	val_51
+51	val_51	51	val_51
+51	val_51	51	val_51
+53	val_53	53	val_53
+54	val_54	54	val_54
+57	val_57	57	val_57
+58	val_58	58	val_58
+58	val_58	58	val_58
+58	val_58	58	val_58
+58	val_58	58	val_58
+64	val_64	64	val_64
+65	val_65	65	val_65
+66	val_66	66	val_66
+67	val_67	67	val_67
+67	val_67	67	val_67
+67	val_67	67	val_67
+67	val_67	67	val_67
+69	val_69	69	val_69
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+72	val_72	72	val_72
+72	val_72	72	val_72
+72	val_72	72	val_72
+72	val_72	72	val_72
+74	val_74	74	val_74
+76	val_76	76	val_76
+76	val_76	76	val_76
+76	val_76	76	val_76
+76	val_76	76	val_76
+77	val_77	77	val_77
+78	val_78	78	val_78
+80	val_80	80	val_80
+82	val_82	82	val_82
+83	val_83	83	val_83
+83	val_83	83	val_83
+83	val_83	83	val_83
+83	val_83	83	val_83
+84	val_84	84	val_84
+84	val_84	84	val_84
+84	val_84	84	val_84
+84	val_84	84	val_84
+85	val_85	85	val_85
+86	val_86	86	val_86
+87	val_87	87	val_87
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+92	val_92	92	val_92
+95	val_95	95	val_95
+95	val_95	95	val_95
+95	val_95	95	val_95
+95	val_95	95	val_95
+96	val_96	96	val_96
+97	val_97	97	val_97
+97	val_97	97	val_97
+97	val_97	97	val_97
+97	val_97	97	val_97
+98	val_98	98	val_98
+98	val_98	98	val_98
+98	val_98	98	val_98
+98	val_98	98	val_98
+100	val_100	100	val_100
+100	val_100	100	val_100
+100	val_100	100	val_100
+100	val_100	100	val_100
+103	val_103	103	val_103
+103	val_103	103	val_103
+103	val_103	103	val_103
+103	val_103	103	val_103
+104	val_104	104	val_104
+104	val_104	104	val_104
+104	val_104	104	val_104
+104	val_104	104	val_104
+105	val_105	105	val_105
+111	val_111	111	val_111
+113	val_113	113	val_113
+113	val_113	113	val_113
+113	val_113	113	val_113
+113	val_113	113	val_113
+114	val_114	114	val_114
+116	val_116	116	val_116
+118	val_118	118	val_118
+118	val_118	118	val_118
+118	val_118	118	val_118
+118	val_118	118	val_118
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+120	val_120	120	val_120
+120	val_120	120	val_120
+120	val_120	120	val_120
+120	val_120	120	val_120
+125	val_125	125	val_125
+125	val_125	125	val_125
+125	val_125	125	val_125
+125	val_125	125	val_125
+126	val_126	126	val_126
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+129	val_129	129	val_129
+129	val_129	129	val_129
+129	val_129	129	val_129
+129	val_129	129	val_129
+131	val_131	131	val_131
+133	val_133	133	val_133
+134	val_134	134	val_134
+134	val_134	134	val_134
+134	val_134	134	val_134
+134	val_134	134	val_134
+136	val_136	136	val_136
+137	val_137	137	val_137
+137	val_137	137	val_137
+137	val_137	137	val_137
+137	val_137	137	val_137
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+143	val_143	143	val_143
+145	val_145	145	val_145
+146	val_146	146	val_146
+146	val_146	146	val_146
+146	val_146	146	val_146
+146	val_146	146	val_146
+149	val_149	149	val_149
+149	val_149	149	val_149
+149	val_149	149	val_149
+149	val_149	149	val_149
+150	val_150	150	val_150
+152	val_152	152	val_152
+152	val_152	152	val_152
+152	val_152	152	val_152
+152	val_152	152	val_152
+153	val_153	153	val_153
+155	val_155	155	val_155
+156	val_156	156	val_156
+157	val_157	157	val_157
+158	val_158	158	val_158
+160	val_160	160	val_160
+162	val_162	162	val_162
+163	val_163	163	val_163
+164	val_164	164	val_164
+164	val_164	164	val_164
+164	val_164	164	val_164
+164	val_164	164	val_164
+165	val_165	165	val_165
+165	val_165	165	val_165
+165	val_165	165	val_165
+165	val_165	165	val_165
+166	val_166	166	val_166
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+168	val_168	168	val_168
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+170	val_170	170	val_170
+172	val_172	172	val_172
+172	val_172	172	val_172
+172	val_172	172	val_172
+172	val_172	172	val_172
+174	val_174	174	val_174
+174	val_174	174	val_174
+174	val_174	174	val_174
+174	val_174	174	val_174
+175	val_175	175	val_175
+175	val_175	175	val_175
+175	val_175	175	val_175
+175	val_175	175	val_175
+176	val_176	176	val_176
+176	val_176	176	val_176
+176	val_176	176	val_176
+176	val_176	176	val_176
+177	val_177	177	val_177
+178	val_178	178	val_178
+179	val_179	179	val_179
+179	val_179	179	val_179
+179	val_179	179	val_179
+179	val_179	179	val_179
+180	val_180	180	val_180
+181	val_181	181	val_181
+183	val_183	183	val_183
+186	val_186	186	val_186
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+189	val_189	189	val_189
+190	val_190	190	val_190
+191	val_191	191	val_191
+191	val_191	191	val_191
+191	val_191	191	val_191
+191	val_191	191	val_191
+192	val_192	192	val_192
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+194	val_194	194	val_194
+195	val_195	195	val_195
+195	val_195	195	val_195
+195	val_195	195	val_195
+195	val_195	195	val_195
+196	val_196	196	val_196
+197	val_197	197	val_197
+197	val_197	197	val_197
+197	val_197	197	val_197
+197	val_197	197	val_197
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+200	val_200	200	val_200
+200	val_200	200	val_200
+200	val_200	200	val_200
+200	val_200	200	val_200
+201	val_201	201	val_201
+202	val_202	202	val_202
+203	val_203	203	val_203
+203	val_203	203	val_203
+203	val_203	203	val_203
+203	val_203	203	val_203
+205	val_205	205	val_205
+205	val_205	205	val_205
+205	val_205	205	val_205
+205	val_205	205	val_205
+207	val_207	207	val_207
+207	val_207	207	val_207
+207	val_207	207	val_207
+207	val_207	207	val_207
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+209	val_209	209	val_209
+209	val_209	209	val_209
+209	val_209	209	val_209
+209	val_209	209	val_209
+213	val_213	213	val_213
+213	val_213	213	val_213
+213	val_213	213	val_213
+213	val_213	213	val_213
+214	val_214	214	val_214
+216	val_216	216	val_216
+216	val_216	216	val_216
+216	val_216	216	val_216
+216	val_216	216	val_216
+217	val_217	217	val_217
+217	val_217	217	val_217
+217	val_217	217	val_217
+217	val_217	217	val_217
+218	val_218	218	val_218
+219	val_219	219	val_219
+219	val_219	219	val_219
+219	val_219	219	val_219
+219	val_219	219	val_219
+221	val_221	221	val_221
+221	val_221	221	val_221
+221	val_221	221	val_221
+221	val_221	221	val_221
+222	val_222	222	val_222
+223	val_223	223	val_223
+223	val_223	223	val_223
+223	val_223	223	val_223
+223	val_223	223	val_223
+224	val_224	224	val_224
+224	val_224	224	val_224
+224	val_224	224	val_224
+224	val_224	224	val_224
+226	val_226	226	val_226
+228	val_228	228	val_228
+229	val_229	229	val_229
+229	val_229	229	val_229
+229	val_229	229	val_229
+229	val_229	229	val_229
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+233	val_233	233	val_233
+233	val_233	233	val_233
+233	val_233	233	val_233
+233	val_233	233	val_233
+235	val_235	235	val_235
+237	val_237	237	val_237
+237	val_237	237	val_237
+237	val_237	237	val_237
+237	val_237	237	val_237
+238	val_238	238	val_238
+238	val_238	238	val_238
+238	val_238	238	val_238
+238	val_238	238	val_238
+239	val_239	239	val_239
+239	val_239	239	val_239
+239	val_239	239	val_239
+239	val_239	239	val_239
+241	val_241	241	val_241
+242	val_242	242	val_242
+242	val_242	242	val_242
+242	val_242	242	val_242
+242	val_242	242	val_242
+244	val_244	244	val_244
+247	val_247	247	val_247
+248	val_248	248	val_248
+249	val_249	249	val_249
+252	val_252	252	val_252
+255	val_255	255	val_255
+255	val_255	255	val_255
+255	val_255	255	val_255
+255	val_255	255	val_255
+256	val_256	256	val_256
+256	val_256	256	val_256
+256	val_256	256	val_256
+256	val_256	256	val_256
+257	val_257	257	val_257
+258	val_258	258	val_258
+260	val_260	260	val_260
+262	val_262	262	val_262
+263	val_263	263	val_263
+265	val_265	265	val_265
+265	val_265	265	val_265
+265	val_265	265	val_265
+265	val_265	265	val_265
+266	val_266	266	val_266
+272	val_272	272	val_272
+272	val_272	272	val_272
+272	val_272	272	val_272
+272	val_272	272	val_272
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+274	val_274	274	val_274
+275	val_275	275	val_275
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+278	val_278	278	val_278
+278	val_278	278	val_278
+278	val_278	278	val_278
+278	val_278	278	val_278
+280	val_280	280	val_280
+280	val_280	280	val_280
+280	val_280	280	val_280
+280	val_280	280	val_280
+281	val_281	281	val_281
+281	val_281	281	val_281
+281	val_281	281	val_281
+281	val_281	281	val_281
+282	val_282	282	val_282
+282	val_282	282	val_282
+282	val_282	282	val_282
+282	val_282	282	val_282
+283	val_283	283	val_283
+284	val_284	284	val_284
+285	val_285	285	val_285
+286	val_286	286	val_286
+287	val_287	287	val_287
+288	val_288	288	val_288
+288	val_288	288	val_288
+288	val_288	288	val_288
+288	val_288	288	val_288
+289	val_289	289	val_289
+291	val_291	291	val_291
+292	val_292	292	val_292
+296	val_296	296	val_296
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+302	val_302	302	val_302
+305	val_305	305	val_305
+306	val_306	306	val_306
+307	val_307	307	val_307
+307	val_307	307	val_307
+307	val_307	307	val_307
+307	val_307	307	val_307
+308	val_308	308	val_308
+309	val_309	309	val_309
+309	val_309	309	val_309
+309	val_309	309	val_309
+309	val_309	309	val_309
+310	val_310	310	val_310
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+315	val_315	315	val_315
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+317	val_317	317	val_317
+317	val_317	317	val_317
+317	val_317	317	val_317
+317	val_317	317	val_317
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+321	val_321	321	val_321
+321	val_321	321	val_321
+321	val_321	321	val_321
+321	val_321	321	val_321
+322	val_322	322	val_322
+322	val_322	322	val_322
+322	val_322	322	val_322
+322	val_322	322	val_322
+323	val_323	323	val_323
+325	val_325	325	val_325
+325	val_325	325	val_325
+325	val_325	325	val_325
+325	val_325	325	val_325
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+331	val_331	331	val_331
+331	val_331	331	val_331
+331	val_331	331	val_331
+331	val_331	331	val_331
+332	val_332	332	val_332
+333	val_333	333	val_333
+333	val_333	333	val_333
+333	val_333	333	val_333
+333	val_333	333	val_333
+335	val_335	335	val_335
+336	val_336	336	val_336
+338	val_338	338	val_338
+339	val_339	339	val_339
+341	val_341	341	val_341
+342	val_342	342	val_342
+342	val_342	342	val_342
+342	val_342	342	val_342
+342	val_342	342	val_342
+344	val_344	344	val_344
+344	val_344	344	val_344
+344	val_344	344	val_344
+344	val_344	344	val_344
+345	val_345	345	val_345
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+351	val_351	351	val_351
+353	val_353	353	val_353
+353	val_353	353	val_353
+353	val_353	353	val_353
+353	val_353	353	val_353
+356	val_356	356	val_356
+360	val_360	360	val_360
+362	val_362	362	val_362
+364	val_364	364	val_364
+365	val_365	365	val_365
+366	val_366	366	val_366
+367	val_367	367	val_367
+367	val_367	367	val_367
+367	val_367	367	val_367
+367	val_367	367	val_367
+368	val_368	368	val_368
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+373	val_373	373	val_373
+374	val_374	374	val_374
+375	val_375	375	val_375
+377	val_377	377	val_377
+378	val_378	378	val_378
+379	val_379	379	val_379
+382	val_382	382	val_382
+382	val_382	382	val_382
+382	val_382	382	val_382
+382	val_382	382	val_382
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+386	val_386	386	val_386
+389	val_389	389	val_389
+392	val_392	392	val_392
+393	val_393	393	val_393
+394	val_394	394	val_394
+395	val_395	395	val_395
+395	val_395	395	val_395
+395	val_395	395	val_395
+395	val_395	395	val_395
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+397	val_397	397	val_397
+397	val_397	397	val_397
+397	val_397	397	val_397
+397	val_397	397	val_397
+399	val_399	399	val_399
+399	val_399	399	val_399
+399	val_399	399	val_399
+399	val_399	399	val_399
+400	val_400	400	val_400
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+402	val_402	402	val_402
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+404	val_404	404	val_404
+404	val_404	404	val_404
+404	val_404	404	val_404
+404	val_404	404	val_404
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+407	val_407	407	val_407
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+411	val_411	411	val_411
+413	val_413	413	val_413
+413	val_413	413	val_413
+413	val_413	413	val_413
+413	val_413	413	val_413
+414	val_414	414	val_414
+414	val_414	414	val_414
+414	val_414	414	val_414
+414	val_414	414	val_414
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+418	val_418	418	val_418
+419	val_419	419	val_419
+421	val_421	421	val_421
+424	val_424	424	val_424
+424	val_424	424	val_424
+424	val_424	424	val_424
+424	val_424	424	val_424
+427	val_427	427	val_427
+429	val_429	429	val_429
+429	val_429	429	val_429
+429	val_429	429	val_429
+429	val_429	429	val_429
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+432	val_432	432	val_432
+435	val_435	435	val_435
+436	val_436	436	val_436
+437	val_437	437	val_437
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+439	val_439	439	val_439
+439	val_439	439	val_439
+439	val_439	439	val_439
+439	val_439	439	val_439
+443	val_443	443	val_443
+444	val_444	444	val_444
+446	val_446	446	val_446
+448	val_448	448	val_448
+449	val_449	449	val_449
+452	val_452	452	val_452
+453	val_453	453	val_453
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+455	val_455	455	val_455
+457	val_457	457	val_457
+458	val_458	458	val_458
+458	val_458	458	val_458
+458	val_458	458	val_458
+458	val_458	458	val_458
+459	val_459	459	val_459
+459	val_459	459	val_459
+459	val_459	459	val_459
+459	val_459	459	val_459
+460	val_460	460	val_460
+462	val_462	462	val_462
+462	val_462	462	val_462
+462	val_462	462	val_462
+462	val_462	462	val_462
+463	val_463	463	val_463
+463	val_463	463	val_463
+463	val_463	463	val_463
+463	val_463	463	val_463
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+467	val_467	467	val_467
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+470	val_470	470	val_470
+472	val_472	472	val_472
+475	val_475	475	val_475
+477	val_477	477	val_477
+478	val_478	478	val_478
+478	val_478	478	val_478
+478	val_478	478	val_478
+478	val_478	478	val_478
+479	val_479	479	val_479
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+481	val_481	481	val_481
+482	val_482	482	val_482
+483	val_483	483	val_483
+484	val_484	484	val_484
+485	val_485	485	val_485
+487	val_487	487	val_487
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+490	val_490	490	val_490
+491	val_491	491	val_491
+492	val_492	492	val_492
+492	val_492	492	val_492
+492	val_492	492	val_492
+492	val_492	492	val_492
+493	val_493	493	val_493
+494	val_494	494	val_494
+495	val_495	495	val_495
+496	val_496	496	val_496
+497	val_497	497	val_497
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+PREHOOK: query: insert overwrite table normal_join_results select * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket4_1
+PREHOOK: Input: default@smb_bucket4_2
+PREHOOK: Output: default@normal_join_results
+POSTHOOK: query: insert overwrite table normal_join_results select * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket4_1
+POSTHOOK: Input: default@smb_bucket4_2
+POSTHOOK: Output: default@normal_join_results
+POSTHOOK: Lineage: normal_join_results.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: normal_join_results.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: normal_join_results.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: normal_join_results.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results
+PREHOOK: type: QUERY
+PREHOOK: Input: default@normal_join_results
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@normal_join_results
+#### A masked pattern was here ####
+278697	278697	101852390308	101852390308
+PREHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_join_results
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_join_results
+#### A masked pattern was here ####
+278697	278697	101852390308	101852390308
+PREHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Statistics: Num rows: 550 Data size: 5293 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 550 Data size: 5293 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 550 Data size: 5293 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.smb_join_results
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.smb_join_results
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket4_1
+PREHOOK: Input: default@smb_bucket4_2
+PREHOOK: Output: default@smb_join_results
+POSTHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket4_1
+POSTHOOK: Input: default@smb_bucket4_2
+POSTHOOK: Output: default@smb_join_results
+POSTHOOK: Lineage: smb_join_results.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket4_1
+PREHOOK: Input: default@smb_bucket4_2
+PREHOOK: Output: default@smb_join_results
+POSTHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket4_1
+POSTHOOK: Input: default@smb_bucket4_2
+POSTHOOK: Output: default@smb_join_results
+POSTHOOK: Lineage: smb_join_results.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from smb_join_results order by k1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_join_results
+#### A masked pattern was here ####
+POSTHOOK: query: select * from smb_join_results order by k1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_join_results
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+2	val_2	2	val_2
+4	val_4	4	val_4
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+8	val_8	8	val_8
+9	val_9	9	val_9
+10	val_10	10	val_10
+11	val_11	11	val_11
+12	val_12	12	val_12
+12	val_12	12	val_12
+12	val_12	12	val_12
+12	val_12	12	val_12
+15	val_15	15	val_15
+15	val_15	15	val_15
+15	val_15	15	val_15
+15	val_15	15	val_15
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+20	val_20	20	val_20
+24	val_24	24	val_24
+24	val_24	24	val_24
+24	val_24	24	val_24
+24	val_24	24	val_24
+26	val_26	26	val_26
+26	val_26	26	val_26
+26	val_26	26	val_26
+26	val_26	26	val_26
+27	val_27	27	val_27
+28	val_28	28	val_28
+30	val_30	30	val_30
+33	val_33	33	val_33
+34	val_34	34	val_34
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+35	val_35	35	val_35
+37	val_37	37	val_37
+37	val_37	37	val_37
+37	val_37	37	val_37
+37	val_37	37	val_37
+41	val_41	41	val_41
+42	val_42	42	val_42
+42	val_42	42	val_42
+42	val_42	42	val_42
+42	val_42	42	val_42
+43	val_43	43	val_43
+44	val_44	44	val_44
+47	val_47	47	val_47
+51	val_51	51	val_51
+51	val_51	51	val_51
+51	val_51	51	val_51
+51	val_51	51	val_51
+53	val_53	53	val_53
+54	val_54	54	val_54
+57	val_57	57	val_57
+58	val_58	58	val_58
+58	val_58	58	val_58
+58	val_58	58	val_58
+58	val_58	58	val_58
+64	val_64	64	val_64
+65	val_65	65	val_65
+66	val_66	66	val_66
+67	val_67	67	val_67
+67	val_67	67	val_67
+67	val_67	67	val_67
+67	val_67	67	val_67
+69	val_69	69	val_69
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+70	val_70	70	val_70
+72	val_72	72	val_72
+72	val_72	72	val_72
+72	val_72	72	val_72
+72	val_72	72	val_72
+74	val_74	74	val_74
+76	val_76	76	val_76
+76	val_76	76	val_76
+76	val_76	76	val_76
+76	val_76	76	val_76
+77	val_77	77	val_77
+78	val_78	78	val_78
+80	val_80	80	val_80
+82	val_82	82	val_82
+83	val_83	83	val_83
+83	val_83	83	val_83
+83	val_83	83	val_83
+83	val_83	83	val_83
+84	val_84	84	val_84
+84	val_84	84	val_84
+84	val_84	84	val_84
+84	val_84	84	val_84
+85	val_85	85	val_85
+86	val_86	86	val_86
+87	val_87	87	val_87
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+90	val_90	90	val_90
+92	val_92	92	val_92
+95	val_95	95	val_95
+95	val_95	95	val_95
+95	val_95	95	val_95
+95	val_95	95	val_95
+96	val_96	96	val_96
+97	val_97	97	val_97
+97	val_97	97	val_97
+97	val_97	97	val_97
+97	val_97	97	val_97
+98	val_98	98	val_98
+98	val_98	98	val_98
+98	val_98	98	val_98
+98	val_98	98	val_98
+100	val_100	100	val_100
+100	val_100	100	val_100
+100	val_100	100	val_100
+100	val_100	100	val_100
+103	val_103	103	val_103
+103	val_103	103	val_103
+103	val_103	103	val_103
+103	val_103	103	val_103
+104	val_104	104	val_104
+104	val_104	104	val_104
+104	val_104	104	val_104
+104	val_104	104	val_104
+105	val_105	105	val_105
+111	val_111	111	val_111
+113	val_113	113	val_113
+113	val_113	113	val_113
+113	val_113	113	val_113
+113	val_113	113	val_113
+114	val_114	114	val_114
+116	val_116	116	val_116
+118	val_118	118	val_118
+118	val_118	118	val_118
+118	val_118	118	val_118
+118	val_118	118	val_118
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+119	val_119	119	val_119
+120	val_120	120	val_120
+120	val_120	120	val_120
+120	val_120	120	val_120
+120	val_120	120	val_120
+125	val_125	125	val_125
+125	val_125	125	val_125
+125	val_125	125	val_125
+125	val_125	125	val_125
+126	val_126	126	val_126
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+128	val_128	128	val_128
+129	val_129	129	val_129
+129	val_129	129	val_129
+129	val_129	129	val_129
+129	val_129	129	val_129
+131	val_131	131	val_131
+133	val_133	133	val_133
+134	val_134	134	val_134
+134	val_134	134	val_134
+134	val_134	134	val_134
+134	val_134	134	val_134
+136	val_136	136	val_136
+137	val_137	137	val_137
+137	val_137	137	val_137
+137	val_137	137	val_137
+137	val_137	137	val_137
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+138	val_138	138	val_138
+143	val_143	143	val_143
+145	val_145	145	val_145
+146	val_146	146	val_146
+146	val_146	146	val_146
+146	val_146	146	val_146
+146	val_146	146	val_146
+149	val_149	149	val_149
+149	val_149	149	val_149
+149	val_149	149	val_149
+149	val_149	149	val_149
+150	val_150	150	val_150
+152	val_152	152	val_152
+152	val_152	152	val_152
+152	val_152	152	val_152
+152	val_152	152	val_152
+153	val_153	153	val_153
+155	val_155	155	val_155
+156	val_156	156	val_156
+157	val_157	157	val_157
+158	val_158	158	val_158
+160	val_160	160	val_160
+162	val_162	162	val_162
+163	val_163	163	val_163
+164	val_164	164	val_164
+164	val_164	164	val_164
+164	val_164	164	val_164
+164	val_164	164	val_164
+165	val_165	165	val_165
+165	val_165	165	val_165
+165	val_165	165	val_165
+165	val_165	165	val_165
+166	val_166	166	val_166
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+167	val_167	167	val_167
+168	val_168	168	val_168
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+169	val_169	169	val_169
+170	val_170	170	val_170
+172	val_172	172	val_172
+172	val_172	172	val_172
+172	val_172	172	val_172
+172	val_172	172	val_172
+174	val_174	174	val_174
+174	val_174	174	val_174
+174	val_174	174	val_174
+174	val_174	174	val_174
+175	val_175	175	val_175
+175	val_175	175	val_175
+175	val_175	175	val_175
+175	val_175	175	val_175
+176	val_176	176	val_176
+176	val_176	176	val_176
+176	val_176	176	val_176
+176	val_176	176	val_176
+177	val_177	177	val_177
+178	val_178	178	val_178
+179	val_179	179	val_179
+179	val_179	179	val_179
+179	val_179	179	val_179
+179	val_179	179	val_179
+180	val_180	180	val_180
+181	val_181	181	val_181
+183	val_183	183	val_183
+186	val_186	186	val_186
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+187	val_187	187	val_187
+189	val_189	189	val_189
+190	val_190	190	val_190
+191	val_191	191	val_191
+191	val_191	191	val_191
+191	val_191	191	val_191
+191	val_191	191	val_191
+192	val_192	192	val_192
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+193	val_193	193	val_193
+194	val_194	194	val_194
+195	val_195	195	val_195
+195	val_195	195	val_195
+195	val_195	195	val_195
+195	val_195	195	val_195
+196	val_196	196	val_196
+197	val_197	197	val_197
+197	val_197	197	val_197
+197	val_197	197	val_197
+197	val_197	197	val_197
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+199	val_199	199	val_199
+200	val_200	200	val_200
+200	val_200	200	val_200
+200	val_200	200	val_200
+200	val_200	200	val_200
+201	val_201	201	val_201
+202	val_202	202	val_202
+203	val_203	203	val_203
+203	val_203	203	val_203
+203	val_203	203	val_203
+203	val_203	203	val_203
+205	val_205	205	val_205
+205	val_205	205	val_205
+205	val_205	205	val_205
+205	val_205	205	val_205
+207	val_207	207	val_207
+207	val_207	207	val_207
+207	val_207	207	val_207
+207	val_207	207	val_207
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+208	val_208	208	val_208
+209	val_209	209	val_209
+209	val_209	209	val_209
+209	val_209	209	val_209
+209	val_209	209	val_209
+213	val_213	213	val_213
+213	val_213	213	val_213
+213	val_213	213	val_213
+213	val_213	213	val_213
+214	val_214	214	val_214
+216	val_216	216	val_216
+216	val_216	216	val_216
+216	val_216	216	val_216
+216	val_216	216	val_216
+217	val_217	217	val_217
+217	val_217	217	val_217
+217	val_217	217	val_217
+217	val_217	217	val_217
+218	val_218	218	val_218
+219	val_219	219	val_219
+219	val_219	219	val_219
+219	val_219	219	val_219
+219	val_219	219	val_219
+221	val_221	221	val_221
+221	val_221	221	val_221
+221	val_221	221	val_221
+221	val_221	221	val_221
+222	val_222	222	val_222
+223	val_223	223	val_223
+223	val_223	223	val_223
+223	val_223	223	val_223
+223	val_223	223	val_223
+224	val_224	224	val_224
+224	val_224	224	val_224
+224	val_224	224	val_224
+224	val_224	224	val_224
+226	val_226	226	val_226
+228	val_228	228	val_228
+229	val_229	229	val_229
+229	val_229	229	val_229
+229	val_229	229	val_229
+229	val_229	229	val_229
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+230	val_230	230	val_230
+233	val_233	233	val_233
+233	val_233	233	val_233
+233	val_233	233	val_233
+233	val_233	233	val_233
+235	val_235	235	val_235
+237	val_237	237	val_237
+237	val_237	237	val_237
+237	val_237	237	val_237
+237	val_237	237	val_237
+238	val_238	238	val_238
+238	val_238	238	val_238
+238	val_238	238	val_238
+238	val_238	238	val_238
+239	val_239	239	val_239
+239	val_239	239	val_239
+239	val_239	239	val_239
+239	val_239	239	val_239
+241	val_241	241	val_241
+242	val_242	242	val_242
+242	val_242	242	val_242
+242	val_242	242	val_242
+242	val_242	242	val_242
+244	val_244	244	val_244
+247	val_247	247	val_247
+248	val_248	248	val_248
+249	val_249	249	val_249
+252	val_252	252	val_252
+255	val_255	255	val_255
+255	val_255	255	val_255
+255	val_255	255	val_255
+255	val_255	255	val_255
+256	val_256	256	val_256
+256	val_256	256	val_256
+256	val_256	256	val_256
+256	val_256	256	val_256
+257	val_257	257	val_257
+258	val_258	258	val_258
+260	val_260	260	val_260
+262	val_262	262	val_262
+263	val_263	263	val_263
+265	val_265	265	val_265
+265	val_265	265	val_265
+265	val_265	265	val_265
+265	val_265	265	val_265
+266	val_266	266	val_266
+272	val_272	272	val_272
+272	val_272	272	val_272
+272	val_272	272	val_272
+272	val_272	272	val_272
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+273	val_273	273	val_273
+274	val_274	274	val_274
+275	val_275	275	val_275
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+277	val_277	277	val_277
+278	val_278	278	val_278
+278	val_278	278	val_278
+278	val_278	278	val_278
+278	val_278	278	val_278
+280	val_280	280	val_280
+280	val_280	280	val_280
+280	val_280	280	val_280
+280	val_280	280	val_280
+281	val_281	281	val_281
+281	val_281	281	val_281
+281	val_281	281	val_281
+281	val_281	281	val_281
+282	val_282	282	val_282
+282	val_282	282	val_282
+282	val_282	282	val_282
+282	val_282	282	val_282
+283	val_283	283	val_283
+284	val_284	284	val_284
+285	val_285	285	val_285
+286	val_286	286	val_286
+287	val_287	287	val_287
+288	val_288	288	val_288
+288	val_288	288	val_288
+288	val_288	288	val_288
+288	val_288	288	val_288
+289	val_289	289	val_289
+291	val_291	291	val_291
+292	val_292	292	val_292
+296	val_296	296	val_296
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+298	val_298	298	val_298
+302	val_302	302	val_302
+305	val_305	305	val_305
+306	val_306	306	val_306
+307	val_307	307	val_307
+307	val_307	307	val_307
+307	val_307	307	val_307
+307	val_307	307	val_307
+308	val_308	308	val_308
+309	val_309	309	val_309
+309	val_309	309	val_309
+309	val_309	309	val_309
+309	val_309	309	val_309
+310	val_310	310	val_310
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+311	val_311	311	val_311
+315	val_315	315	val_315
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+316	val_316	316	val_316
+317	val_317	317	val_317
+317	val_317	317	val_317
+317	val_317	317	val_317
+317	val_317	317	val_317
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+318	val_318	318	val_318
+321	val_321	321	val_321
+321	val_321	321	val_321
+321	val_321	321	val_321
+321	val_321	321	val_321
+322	val_322	322	val_322
+322	val_322	322	val_322
+322	val_322	322	val_322
+322	val_322	322	val_322
+323	val_323	323	val_323
+325	val_325	325	val_325
+325	val_325	325	val_325
+325	val_325	325	val_325
+325	val_325	325	val_325
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+327	val_327	327	val_327
+331	val_331	331	val_331
+331	val_331	331	val_331
+331	val_331	331	val_331
+331	val_331	331	val_331
+332	val_332	332	val_332
+333	val_333	333	val_333
+333	val_333	333	val_333
+333	val_333	333	val_333
+333	val_333	333	val_333
+335	val_335	335	val_335
+336	val_336	336	val_336
+338	val_338	338	val_338
+339	val_339	339	val_339
+341	val_341	341	val_341
+342	val_342	342	val_342
+342	val_342	342	val_342
+342	val_342	342	val_342
+342	val_342	342	val_342
+344	val_344	344	val_344
+344	val_344	344	val_344
+344	val_344	344	val_344
+344	val_344	344	val_344
+345	val_345	345	val_345
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+348	val_348	348	val_348
+351	val_351	351	val_351
+353	val_353	353	val_353
+353	val_353	353	val_353
+353	val_353	353	val_353
+353	val_353	353	val_353
+356	val_356	356	val_356
+360	val_360	360	val_360
+362	val_362	362	val_362
+364	val_364	364	val_364
+365	val_365	365	val_365
+366	val_366	366	val_366
+367	val_367	367	val_367
+367	val_367	367	val_367
+367	val_367	367	val_367
+367	val_367	367	val_367
+368	val_368	368	val_368
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+369	val_369	369	val_369
+373	val_373	373	val_373
+374	val_374	374	val_374
+375	val_375	375	val_375
+377	val_377	377	val_377
+378	val_378	378	val_378
+379	val_379	379	val_379
+382	val_382	382	val_382
+382	val_382	382	val_382
+382	val_382	382	val_382
+382	val_382	382	val_382
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+384	val_384	384	val_384
+386	val_386	386	val_386
+389	val_389	389	val_389
+392	val_392	392	val_392
+393	val_393	393	val_393
+394	val_394	394	val_394
+395	val_395	395	val_395
+395	val_395	395	val_395
+395	val_395	395	val_395
+395	val_395	395	val_395
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+396	val_396	396	val_396
+397	val_397	397	val_397
+397	val_397	397	val_397
+397	val_397	397	val_397
+397	val_397	397	val_397
+399	val_399	399	val_399
+399	val_399	399	val_399
+399	val_399	399	val_399
+399	val_399	399	val_399
+400	val_400	400	val_400
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+401	val_401	401	val_401
+402	val_402	402	val_402
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+403	val_403	403	val_403
+404	val_404	404	val_404
+404	val_404	404	val_404
+404	val_404	404	val_404
+404	val_404	404	val_404
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+406	val_406	406	val_406
+407	val_407	407	val_407
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+409	val_409	409	val_409
+411	val_411	411	val_411
+413	val_413	413	val_413
+413	val_413	413	val_413
+413	val_413	413	val_413
+413	val_413	413	val_413
+414	val_414	414	val_414
+414	val_414	414	val_414
+414	val_414	414	val_414
+414	val_414	414	val_414
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+417	val_417	417	val_417
+418	val_418	418	val_418
+419	val_419	419	val_419
+421	val_421	421	val_421
+424	val_424	424	val_424
+424	val_424	424	val_424
+424	val_424	424	val_424
+424	val_424	424	val_424
+427	val_427	427	val_427
+429	val_429	429	val_429
+429	val_429	429	val_429
+429	val_429	429	val_429
+429	val_429	429	val_429
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+430	val_430	430	val_430
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+431	val_431	431	val_431
+432	val_432	432	val_432
+435	val_435	435	val_435
+436	val_436	436	val_436
+437	val_437	437	val_437
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+438	val_438	438	val_438
+439	val_439	439	val_439
+439	val_439	439	val_439
+439	val_439	439	val_439
+439	val_439	439	val_439
+443	val_443	443	val_443
+444	val_444	444	val_444
+446	val_446	446	val_446
+448	val_448	448	val_448
+449	val_449	449	val_449
+452	val_452	452	val_452
+453	val_453	453	val_453
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+454	val_454	454	val_454
+455	val_455	455	val_455
+457	val_457	457	val_457
+458	val_458	458	val_458
+458	val_458	458	val_458
+458	val_458	458	val_458
+458	val_458	458	val_458
+459	val_459	459	val_459
+459	val_459	459	val_459
+459	val_459	459	val_459
+459	val_459	459	val_459
+460	val_460	460	val_460
+462	val_462	462	val_462
+462	val_462	462	val_462
+462	val_462	462	val_462
+462	val_462	462	val_462
+463	val_463	463	val_463
+463	val_463	463	val_463
+463	val_463	463	val_463
+463	val_463	463	val_463
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+466	val_466	466	val_466
+467	val_467	467	val_467
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+468	val_468	468	val_468
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+469	val_469	469	val_469
+470	val_470	470	val_470
+472	val_472	472	val_472
+475	val_475	475	val_475
+477	val_477	477	val_477
+478	val_478	478	val_478
+478	val_478	478	val_478
+478	val_478	478	val_478
+478	val_478	478	val_478
+479	val_479	479	val_479
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+480	val_480	480	val_480
+481	val_481	481	val_481
+482	val_482	482	val_482
+483	val_483	483	val_483
+484	val_484	484	val_484
+485	val_485	485	val_485
+487	val_487	487	val_487
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+489	val_489	489	val_489
+490	val_490	490	val_490
+491	val_491	491	val_491
+492	val_492	492	val_492
+492	val_492	492	val_492
+492	val_492	492	val_492
+492	val_492	492	val_492
+493	val_493	493	val_493
+494	val_494	494	val_494
+495	val_495	495	val_495
+496	val_496	496	val_496
+497	val_497	497	val_497
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+498	val_498	498	val_498
+PREHOOK: query: insert overwrite table normal_join_results select * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket4_1
+PREHOOK: Input: default@smb_bucket4_2
+PREHOOK: Output: default@normal_join_results
+POSTHOOK: query: insert overwrite table normal_join_results select * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket4_1
+POSTHOOK: Input: default@smb_bucket4_2
+POSTHOOK: Output: default@normal_join_results
+POSTHOOK: Lineage: normal_join_results.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: normal_join_results.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: normal_join_results.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: normal_join_results.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results
+PREHOOK: type: QUERY
+PREHOOK: Input: default@normal_join_results
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from normal_join_results
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@normal_join_results
+#### A masked pattern was here ####
+278697	278697	101852390308	101852390308
+PREHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_join_results
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(k1)) as k1, sum(hash(k2)) as k2, sum(hash(v1)) as v1, sum(hash(v2)) as v2 from smb_join_results
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_join_results
+#### A masked pattern was here ####
+278697	278697	101852390308	101852390308
+PREHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 1000) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 1000) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Statistics: Num rows: 182 Data size: 1756 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 182 Data size: 1756 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 182 Data size: 1756 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.smb_join_results
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.smb_join_results
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket4_1
+PREHOOK: Input: default@smb_bucket4_2
+PREHOOK: Output: default@smb_join_results
+POSTHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(a)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket4_1
+POSTHOOK: Input: default@smb_bucket4_2
+POSTHOOK: Output: default@smb_join_results
+POSTHOOK: Lineage: smb_join_results.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table smb_join_results
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 1000) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 1000) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Statistics: Num rows: 182 Data size: 1756 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 182 Data size: 1756 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 182 Data size: 1756 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.smb_join_results
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.smb_join_results
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket4_1
+PREHOOK: Input: default@smb_bucket4_2
+PREHOOK: Output: default@smb_join_results
+POSTHOOK: query: insert overwrite table smb_join_results
+select /*+mapjoin(b)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key where a.key>1000
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket4_1
+POSTHOOK: Input: default@smb_bucket4_2
+POSTHOOK: Output: default@smb_join_results
+POSTHOOK: Lineage: smb_join_results.k1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.k2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v1 SIMPLE [(smb_bucket4_1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: smb_join_results.v2 SIMPLE [(smb_bucket4_2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key join smb_bucket4_2 c on b.key = c.key where a.key>1000
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key join smb_bucket4_2 c on b.key = c.key where a.key>1000
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 1000) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 1000) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 1000) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 166 Data size: 1597 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+                Statistics: Num rows: 365 Data size: 3513 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                  Statistics: Num rows: 365 Data size: 3513 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 365 Data size: 3513 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key join smb_bucket4_2 c on b.key = c.key where a.key>1000
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_bucket4_1
+PREHOOK: Input: default@smb_bucket4_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a join smb_bucket4_2 b on a.key = b.key join smb_bucket4_2 c on b.key = c.key where a.key>1000
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_bucket4_1
+POSTHOOK: Input: default@smb_bucket4_2
+#### A masked pattern was here ####


[08/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_14.q.out
new file mode 100644
index 0000000..cafdf5d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_14.q.out
@@ -0,0 +1,1709 @@
+PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl1
+PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl2
+PREHOOK: query: insert overwrite table tbl1
+select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: insert overwrite table tbl1
+select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl1
+POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table tbl2
+select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: insert overwrite table tbl2
+select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl2
+POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from (
+  select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from (
+  select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from (
+  select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (
+  select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+22
+PREHOOK: query: -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join
+-- Add a order by at the end to make the results deterministic.
+explain
+select key, count(*) from 
+(
+  select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join
+-- Add a order by at the end to make the results deterministic.
+explain
+select key, count(*) from 
+(
+  select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+order by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: bigint)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select key, count(*) from 
+(
+  select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select key, count(*) from 
+(
+  select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+0	9
+2	1
+4	1
+5	9
+8	1
+9	1
+PREHOOK: query: -- The mapjoin is being performed as part of more than one sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from
+(
+  select key, count(*) from 
+  (
+    select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1
+  group by key
+) subq2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The mapjoin is being performed as part of more than one sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from
+(
+  select key, count(*) from 
+  (
+    select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1
+  group by key
+) subq2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    aggregations: count()
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: bigint)
+        Reducer 4 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from
+(
+  select key, count(*) from 
+  (
+    select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1
+  group by key
+) subq2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(
+  select key, count(*) from 
+  (
+    select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+  ) subq1
+  group by key
+) subq2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+6
+PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should 
+-- be converted to a sort-merge join.
+explain
+select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should 
+-- be converted to a sort-merge join.
+explain
+select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should 
+-- be converted to a sort-merge join, although there is more than one level of sub-query
+explain
+select /*+mapjoin(subq2)*/ count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join tbl2 b
+  on subq2.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should 
+-- be converted to a sort-merge join, although there is more than one level of sub-query
+explain
+select /*+mapjoin(subq2)*/ count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join tbl2 b
+  on subq2.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 key (type: int)
+                Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(subq2)*/ count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join tbl2 b
+  on subq2.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(subq2)*/ count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join tbl2 b
+  on subq2.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- Both the big table and the small table are nested sub-queries i.e more then 1 level of sub-query.
+-- The join should be converted to a sort-merge join
+explain
+select /*+mapjoin(subq2)*/ count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq3 
+  where key < 6
+  ) subq4
+  on subq2.key = subq4.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Both the big table and the small table are nested sub-queries i.e more then 1 level of sub-query.
+-- The join should be converted to a sort-merge join
+explain
+select /*+mapjoin(subq2)*/ count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq3 
+  where key < 6
+  ) subq4
+  on subq2.key = subq4.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(subq2)*/ count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq3 
+  where key < 6
+  ) subq4
+  on subq2.key = subq4.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(subq2)*/ count(*) from 
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1 
+  where key < 6
+  ) subq2
+  join
+  (
+  select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq3 
+  where key < 6
+  ) subq4
+  on subq2.key = subq4.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters and the join key
+-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one 
+-- item, but that is not part of the join key.
+explain
+select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
+    join
+  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters and the join key
+-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one 
+-- item, but that is not part of the join key.
+explain
+select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
+    join
+  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 8) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 8) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
+    join
+  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 
+    join
+  (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side
+-- join should be performed
+explain
+select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
+    join
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side
+-- join should be performed
+explain
+select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
+    join
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: (key + 1) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Filter Operator
+                      predicate: _col0 is not null (type: boolean)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: (key + 1) (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Filter Operator
+                      predicate: _col0 is not null (type: boolean)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
+    join
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 
+    join
+  (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+  on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+22
+PREHOOK: query: -- The small table is a sub-query and the big table is not.
+-- It should be converted to a sort-merge join.
+explain
+select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The small table is a sub-query and the big table is not.
+-- It should be converted to a sort-merge join.
+explain
+select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 key (type: int)
+                Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- The big table is a sub-query and the small table is not.
+-- It should be converted to a sort-merge join.
+explain
+select /*+mapjoin(a)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The big table is a sub-query and the small table is not.
+-- It should be converted to a sort-merge join.
+explain
+select /*+mapjoin(a)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 key (type: int)
+                Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join tbl2 a on subq1.key = a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. 
+-- It should be converted to to a sort-merge join
+explain
+select /*+mapjoin(subq1, subq2)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on (subq1.key = subq2.key)
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  on (subq1.key = subq3.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. 
+-- It should be converted to to a sort-merge join
+explain
+select /*+mapjoin(subq1, subq2)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on (subq1.key = subq2.key)
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  on (subq1.key = subq3.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 0 to 2
+                keys:
+                  0 _col0 (type: int)
+                  1 _col0 (type: int)
+                  2 _col0 (type: int)
+                Statistics: Num rows: 6 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(subq1, subq2)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  on (subq1.key = subq3.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(subq1, subq2)*/ count(*) from 
+  (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+  on subq1.key = subq2.key
+    join
+  (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+  on (subq1.key = subq3.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+56
+PREHOOK: query: -- The mapjoin is being performed on a nested sub-query, and an aggregation is performed after that.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from (
+  select /*+mapjoin(subq2)*/ subq2.key as key, subq2.value as value1, b.value as value2 from
+  (
+    select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1
+    where key < 6
+  ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The mapjoin is being performed on a nested sub-query, and an aggregation is performed after that.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from (
+  select /*+mapjoin(subq2)*/ subq2.key as key, subq2.value as value1, b.value as value2 from
+  (
+    select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1
+    where key < 6
+  ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int)
+                  1 key (type: int)
+                Statistics: Num rows: 11 Data size: 77 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select count(*) from (
+  select /*+mapjoin(subq2)*/ subq2.key as key, subq2.value as value1, b.value as value2 from
+  (
+    select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1
+    where key < 6
+  ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (
+  select /*+mapjoin(subq2)*/ subq2.key as key, subq2.value as value1, b.value as value2 from
+  (
+    select * from
+    (
+      select a.key as key, a.value as value from tbl1 a where key < 8
+    ) subq1
+    where key < 6
+  ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20


[21/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out b/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
new file mode 100644
index 0000000..46d6281
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out
@@ -0,0 +1,2501 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j1
+PREHOOK: query: CREATE TABLE dest_j2(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j2
+POSTHOOK: query: CREATE TABLE dest_j2(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j2
+PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin
+
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest_j1
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key) 
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin
+
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest_j1
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key) 
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: z
+                  Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 184500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        Estimated key counts: Map 3 => 25
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col1 (type: string)
+                        outputColumnNames: _col0, _col3
+                        input vertices:
+                          1 Map 3
+                        Position of Big Table: 0
+                        Statistics: Num rows: 58 Data size: 10266 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col3 (type: string)
+                          null sort order: a
+                          sort order: +
+                          Map-reduce partition columns: _col3 (type: string)
+                          Statistics: Num rows: 58 Data size: 10266 Basic stats: COMPLETE Column stats: COMPLETE
+                          tag: 0
+                          value expressions: _col0 (type: string)
+                          auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: hr=11
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                    hr 11
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.srcpart
+                    numFiles 1
+                    numRows 500
+                    partition_columns ds/hr
+                    partition_columns.types string:string
+                    rawDataSize 5312
+                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.srcpart
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct srcpart { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcpart
+                  name: default.srcpart
+            Truncated Path -> Alias:
+              /srcpart/ds=2008-04-08/hr=11 [z]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: 1
+                        value expressions: _col0 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src1
+                    numFiles 1
+                    numRows 25
+                    rawDataSize 191
+                    serialization.ddl struct src1 { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 216
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src1
+                      numFiles 1
+                      numRows 25
+                      rawDataSize 191
+                      serialization.ddl struct src1 { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 216
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src1
+                  name: default.src1
+            Truncated Path -> Alias:
+              /src1 [x]
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: 1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    numFiles 1
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct src { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [y]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col3 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col3, _col6
+                Position of Big Table: 1
+                Statistics: Num rows: 141 Data size: 37788 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col3 (type: string), _col0 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 141 Data size: 37788 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 141 Data size: 37788 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value,val2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.dest_j1
+                          numFiles 0
+                          numRows 0
+                          rawDataSize 0
+                          serialization.ddl struct dest_j1 { string key, string value, string val2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 0
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest_j1
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value,val2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.dest_j1
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct dest_j1 { string key, string value, string val2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest_j1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE dest_j1
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key) 
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key) 
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@dest_j1
+POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from dest_j1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest_j1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+146	val_146	val_146
+146	val_146	val_146
+146	val_146	val_146
+146	val_146	val_146
+150	val_150	val_150
+213	val_213	val_213
+213	val_213	val_213
+213	val_213	val_213
+213	val_213	val_213
+238	val_238	val_238
+238	val_238	val_238
+238	val_238	val_238
+238	val_238	val_238
+255	val_255	val_255
+255	val_255	val_255
+255	val_255	val_255
+255	val_255	val_255
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+278	val_278	val_278
+278	val_278	val_278
+278	val_278	val_278
+278	val_278	val_278
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+66	val_66	val_66
+98	val_98	val_98
+98	val_98	val_98
+98	val_98	val_98
+98	val_98	val_98
+PREHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest_j1
+SELECT x.key, z.value, y.value
+FROM src w JOIN src1 x ON (x.value = w.value) 
+JOIN src y ON (x.key = y.key) 
+JOIN src1 z ON (x.key = z.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest_j1
+SELECT x.key, z.value, y.value
+FROM src w JOIN src1 x ON (x.value = w.value) 
+JOIN src y ON (x.key = y.key) 
+JOIN src1 z ON (x.key = z.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Map 6 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (value is not null and key is not null) (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        Estimated key counts: Map 4 => 25
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        outputColumnNames: _col0, _col1, _col3
+                        input vertices:
+                          1 Map 4
+                        Position of Big Table: 0
+                        Statistics: Num rows: 44 Data size: 11616 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col1 (type: string)
+                          null sort order: a
+                          sort order: +
+                          Map-reduce partition columns: _col1 (type: string)
+                          Statistics: Num rows: 44 Data size: 11616 Basic stats: COMPLETE Column stats: COMPLETE
+                          tag: 0
+                          value expressions: _col0 (type: string), _col3 (type: string)
+                          auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src1
+                    numFiles 1
+                    numRows 25
+                    rawDataSize 191
+                    serialization.ddl struct src1 { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 216
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src1
+                      numFiles 1
+                      numRows 25
+                      rawDataSize 191
+                      serialization.ddl struct src1 { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 216
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src1
+                  name: default.src1
+            Truncated Path -> Alias:
+              /src1 [x]
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: z
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: 1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src1
+                    numFiles 1
+                    numRows 25
+                    rawDataSize 191
+                    serialization.ddl struct src1 { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 216
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src1
+                      numFiles 1
+                      numRows 25
+                      rawDataSize 191
+                      serialization.ddl struct src1 { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 216
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src1
+                  name: default.src1
+            Truncated Path -> Alias:
+              /src1 [z]
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: w
+                  Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: 1
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    numFiles 1
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct src { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [w]
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: 1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    numFiles 1
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct src { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [y]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col1 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col3
+                Position of Big Table: 1
+                Statistics: Num rows: 102 Data size: 17850 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 102 Data size: 17850 Basic stats: COMPLETE Column stats: COMPLETE
+                  tag: 0
+                  value expressions: _col3 (type: string)
+                  auto parallelism: true
+        Reducer 3 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col3, _col6
+                Position of Big Table: 0
+                Statistics: Num rows: 248 Data size: 65968 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col3 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 248 Data size: 65968 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 248 Data size: 65968 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value,val2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.dest_j1
+                          numFiles 1
+                          numRows 85
+                          rawDataSize 1600
+                          serialization.ddl struct dest_j1 { string key, string value, string val2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 1685
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest_j1
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value,val2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.dest_j1
+                numFiles 1
+                numRows 85
+                rawDataSize 1600
+                serialization.ddl struct dest_j1 { string key, string value, string val2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 1685
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest_j1
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE dest_j1
+SELECT x.key, z.value, y.value
+FROM src w JOIN src1 x ON (x.value = w.value) 
+JOIN src y ON (x.key = y.key) 
+JOIN src1 z ON (x.key = z.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: INSERT OVERWRITE TABLE dest_j1
+SELECT x.key, z.value, y.value
+FROM src w JOIN src1 x ON (x.value = w.value) 
+JOIN src y ON (x.key = y.key) 
+JOIN src1 z ON (x.key = z.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@dest_j1
+POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value SIMPLE [(src1)z.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from dest_j1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest_j1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+146	val_146	val_146
+146	val_146	val_146
+146	val_146	val_146
+146	val_146	val_146
+150	val_150	val_150
+213	val_213	val_213
+213	val_213	val_213
+213	val_213	val_213
+213	val_213	val_213
+238	val_238	val_238
+238	val_238	val_238
+238	val_238	val_238
+238	val_238	val_238
+255	val_255	val_255
+255	val_255	val_255
+255	val_255	val_255
+255	val_255	val_255
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+278	val_278	val_278
+278	val_278	val_278
+278	val_278	val_278
+278	val_278	val_278
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+66	val_66	val_66
+98	val_98	val_98
+98	val_98	val_98
+98	val_98	val_98
+98	val_98	val_98
+PREHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, z.value, res.value
+FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, z.value, res.value
+FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 3 <- Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: z
+                  Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 184500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 184500 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: 0
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: hr=11
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                    hr 11
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.srcpart
+                    numFiles 1
+                    numRows 500
+                    partition_columns ds/hr
+                    partition_columns.types string:string
+                    rawDataSize 5312
+                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.srcpart
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct srcpart { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcpart
+                  name: default.srcpart
+            Truncated Path -> Alias:
+              /srcpart/ds=2008-04-08/hr=11 [z]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        Estimated key counts: Map 4 => 25
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        outputColumnNames: _col1, _col2
+                        input vertices:
+                          1 Map 4
+                        Position of Big Table: 0
+                        Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                        Select Operator
+                          expressions: _col1 (type: string), _col2 (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            key expressions: _col1 (type: string)
+                            null sort order: a
+                            sort order: +
+                            Map-reduce partition columns: _col1 (type: string)
+                            Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                            tag: 1
+                            value expressions: _col0 (type: string)
+                            auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    numFiles 1
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct src { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [y]
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: 1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src1
+                    numFiles 1
+                    numRows 25
+                    rawDataSize 191
+                    serialization.ddl struct src1 { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 216
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src1
+                      numFiles 1
+                      numRows 25
+                      rawDataSize 191
+                      serialization.ddl struct src1 { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 216
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src1
+                  name: default.src1
+            Truncated Path -> Alias:
+              /src1 [x]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col1 (type: string)
+                outputColumnNames: _col0, _col3, _col4
+                Position of Big Table: 0
+                Statistics: Num rows: 140 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 140 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 140 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value,val2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.dest_j2
+                          numFiles 0
+                          numRows 0
+                          rawDataSize 0
+                          serialization.ddl struct dest_j2 { string key, string value, string val2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 0
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest_j2
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value,val2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.dest_j2
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct dest_j2 { string key, string value, string val2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest_j2
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, z.value, res.value
+FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Output: default@dest_j2
+POSTHOOK: query: INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, z.value, res.value
+FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@dest_j2
+POSTHOOK: Lineage: dest_j2.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j2.val2 SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j2.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from dest_j2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest_j2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j2
+#### A masked pattern was here ####
+146	val_146	val_146
+146	val_146	val_146
+146	val_146	val_146
+146	val_146	val_146
+150	val_150	val_150
+213	val_213	val_213
+213	val_213	val_213
+213	val_213	val_213
+213	val_213	val_213
+238	val_238	val_238
+238	val_238	val_238
+238	val_238	val_238
+238	val_238	val_238
+255	val_255	val_255
+255	val_255	val_255
+255	val_255	val_255
+255	val_255	val_255
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+278	val_278	val_278
+278	val_278	val_278
+278	val_278	val_278
+278	val_278	val_278
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+66	val_66	val_66
+98	val_98	val_98
+98	val_98	val_98
+98	val_98	val_98
+98	val_98	val_98
+PREHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, z.value, res.value
+FROM (select x.key, x.value from src1 x LEFT OUTER JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, z.value, res.value
+FROM (select x.key, x.value from src1 x LEFT OUTER JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE)
+        Reducer 4 <- Map 3 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: z
+                  Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 184500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 184500 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: 0
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: hr=11
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                    hr 11
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.srcpart
+                    numFiles 1
+                    numRows 500
+                    partition_columns ds/hr
+                    partition_columns.types string:string
+                    rawDataSize 5312
+                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.srcpart
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct srcpart { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcpart
+                  name: default.srcpart
+            Truncated Path -> Alias:
+              /srcpart/ds=2008-04-08/hr=11 [z]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                      tag: 0
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    numFiles 1
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct src { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [y]
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 25 Data size: 4375 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: 1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src1
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src1
+                    numFiles 1
+                    numRows 25
+                    rawDataSize 191
+                    serialization.ddl struct src1 { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 216
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src1
+                      numFiles 1
+                      numRows 25
+                      rawDataSize 191
+                      serialization.ddl struct src1 { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 216
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src1
+                  name: default.src1
+            Truncated Path -> Alias:
+              /src1 [x]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col1 (type: string)
+                outputColumnNames: _col0, _col3, _col4
+                Position of Big Table: 0
+                Statistics: Num rows: 140 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col3 (type: string), _col0 (type: string), _col4 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 140 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 140 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value,val2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.dest_j2
+                          numFiles 1
+                          numRows 85
+                          rawDataSize 1600
+                          serialization.ddl struct dest_j2 { string key, string value, string val2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 1685
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.dest_j2
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+        Reducer 4 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Right Outer Join0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1, _col2
+                Position of Big Table: 1
+                Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col1 (type: string), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: _col1 (type: string)
+                    null sort order: a
+                    sort order: +
+                    Map-reduce partition columns: _col1 (type: string)
+                    Statistics: Num rows: 60 Data size: 10500 Basic stats: COMPLETE Column stats: COMPLETE
+                    tag: 1
+                    value expressions: _col0 (type: string)
+                    auto parallelism: true
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value,val2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.dest_j2
+                numFiles 1
+                numRows 85
+                rawDataSize 1600
+                serialization.ddl struct dest_j2 { string key, string value, string val2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 1685
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.dest_j2
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, z.value, res.value
+FROM (select x.key, x.value from src1 x LEFT OUTER JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Output: default@dest_j2
+POSTHOOK: query: INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, z.value, res.value
+FROM (select x.key, x.value from src1 x LEFT OUTER JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart z ON (res.value = z.value and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@dest_j2
+POSTHOOK: Lineage: dest_j2.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j2.val2 SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j2.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from dest_j2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest_j2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j2
+#### A masked pattern was here ####
+	val_165	val_165
+	val_165	val_165
+	val_193	val_193
+	val_193	val_193
+	val_193	val_193
+	val_265	val_265
+	val_265	val_265
+	val_27	val_27
+	val_409	val_409
+	val_409	val_409
+	val_409	val_409
+	val_484	val_484
+146	val_146	val_146
+146	val_146	val_146
+146	val_146	val_146
+146	val_146	val_146
+150	val_150	val_150
+213	val_213	val_213
+213	val_213	val_213
+213	val_213	val_213
+213	val_213	val_213
+238	val_238	val_238
+238	val_238	val_238
+238	val_238	val_238
+238	val_238	val_238
+255	val_255	val_255
+255	val_255	val_255
+255	val_255	val_255
+255	val_255	val_255
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+273	val_273	val_273
+278	val_278	val_278
+278	val_278	val_278
+278	val_278	val_278
+278	val_278	val_278
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+311	val_311	val_311
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+401	val_401	val_401
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+406	val_406	val_406
+66	val_66	val_66
+98	val_98	val_98
+98	val_98	val_98
+98	val_98	val_98
+98	val_98	val_98
+PREHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, x.value, res.value  
+FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart x ON (res.value = x.value and x.ds='2008-04-08' and x.hr=11)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+INSERT OVERWRITE TABLE dest_j2
+SELECT res.key, x.value, res.value  
+FROM (select x.key, x.value from src1 x JOIN src y ON (x.key = y.key)) res 
+JOIN srcpart x ON (res.value = x.value and x.ds='2008-04-08' and x.hr=11)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 3 <- Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: x
+                  Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 184500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 184500 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: y
+                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size

<TRUNCATED>

[32/35] hive git commit: HIVE-14921: Move slow CliDriver tests to MiniLlap - part 2 (Prasanth Jayachandran reviewed by Siddharth Seth)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f562dfb5/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out
new file mode 100644
index 0000000..e9bb701
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out
@@ -0,0 +1,1068 @@
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+PREHOOK: query: -- empty partitions (HIVE-3205)
+explain extended
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: -- empty partitions (HIVE-3205)
+explain extended
+select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: unknown
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key is not null and (ds = '2008-04-08')) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: unknown
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col7
+                Position of Big Table: 1
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2
+                          columns.types int:string:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.escape.crlf true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(b)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+#### A masked pattern was here ####
+PREHOOK: query: explain extended
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: unknown
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: (key is not null and (ds = '2008-04-08')) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: PARTIAL
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: unknown
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col7
+                Position of Big Table: 1
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2
+                          columns.types int:string:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.escape.crlf true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part_2
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+mapjoin(a)*/ a.key, a.value, b.value
+from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part_2
+#### A masked pattern was here ####
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08
+PREHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+PREHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+PREHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: create table bucketmapjoin_tmp_result (key string , value1 string, value2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    numRows 0
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      numRows 0
+                      rawDataSize 0
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col6
+                Position of Big Table: 1
+                Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 0
+                          numRows 0
+                          rawDataSize 0
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 0
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 0
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(b)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0
+PREHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 26 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: srcbucket_mapjoin
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin
+                    numFiles 2
+                    numRows 0
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 2750
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 2
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin
+                      numFiles 2
+                      numRows 0
+                      rawDataSize 0
+                      serialization.ddl struct srcbucket_mapjoin { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 2750
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin
+                  name: default.srcbucket_mapjoin
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin [a]
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 55 Data size: 6252 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: value (type: string)
+                      auto parallelism: true
+            Execution mode: llap
+            LLAP IO: no inputs
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: ds=2008-04-08
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                  properties:
+                    bucket_count 4
+                    bucket_field_name key
+                    columns key,value
+                    columns.comments 
+                    columns.types int:string
+#### A masked pattern was here ####
+                    name default.srcbucket_mapjoin_part
+                    numFiles 4
+                    numRows 0
+                    partition_columns ds
+                    partition_columns.types string
+                    rawDataSize 0
+                    serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count 4
+                      bucket_field_name key
+                      columns key,value
+                      columns.comments 
+                      columns.types int:string
+#### A masked pattern was here ####
+                      name default.srcbucket_mapjoin_part
+                      partition_columns ds
+                      partition_columns.types string
+                      serialization.ddl struct srcbucket_mapjoin_part { i32 key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.srcbucket_mapjoin_part
+                  name: default.srcbucket_mapjoin_part
+            Truncated Path -> Alias:
+              /srcbucket_mapjoin_part/ds=2008-04-08 [b]
+        Reducer 2 
+            Execution mode: llap
+            Needs Tagging: false
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                outputColumnNames: _col0, _col1, _col6
+                Position of Big Table: 1
+                Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Statistics: Num rows: 60 Data size: 6877 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                          bucket_count -1
+                          columns key,value1,value2
+                          columns.comments 
+                          columns.types string:string:string
+#### A masked pattern was here ####
+                          name default.bucketmapjoin_tmp_result
+                          numFiles 1
+                          numRows 464
+                          rawDataSize 8519
+                          serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          totalSize 8983
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.bucketmapjoin_tmp_result
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+                bucket_count -1
+                columns key,value1,value2
+                columns.comments 
+                columns.types string:string:string
+#### A masked pattern was here ####
+                name default.bucketmapjoin_tmp_result
+                numFiles 1
+                numRows 464
+                rawDataSize 8519
+                serialization.ddl struct bucketmapjoin_tmp_result { string key, string value1, string value2}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 8983
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucketmapjoin_tmp_result
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_1
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_1
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_1.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: query: insert overwrite table bucketmapjoin_tmp_result 
+select /*+mapjoin(a)*/ a.key, a.value, b.value 
+from srcbucket_mapjoin a join srcbucket_mapjoin_part b 
+on a.key=b.key where b.ds="2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@bucketmapjoin_tmp_result
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE [(srcbucket_mapjoin)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE [(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+#### A masked pattern was here ####
+464
+PREHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_tmp_result
+PREHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: query: insert overwrite table bucketmapjoin_hash_result_2
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from bucketmapjoin_tmp_result
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_tmp_result
+POSTHOOK: Output: default@bucketmapjoin_hash_result_2
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.key EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value1 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value1, type:string, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_hash_result_2.value2 EXPRESSION [(bucketmapjoin_tmp_result)bucketmapjoin_tmp_result.FieldSchema(name:value2, type:string, comment:null), ]
+PREHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucketmapjoin_hash_result_1
+PREHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key-b.key, a.value1-b.value1, a.value2-b.value2
+from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
+on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucketmapjoin_hash_result_1
+POSTHOOK: Input: default@bucketmapjoin_hash_result_2
+#### A masked pattern was here ####
+0	0	0