You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2015/09/15 22:42:07 UTC
[01/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Repository: hive
Updated Branches:
refs/heads/llap 75b1e6d42 -> ace87818b
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/sample1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/sample1.q.out b/ql/src/test/results/clientpositive/llap/sample1.q.out
new file mode 100644
index 0000000..68eb0d9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/sample1.q.out
@@ -0,0 +1,727 @@
+PREHOOK: query: CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: -- no input pruning, no sample filter
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest1 SELECT s.*
+FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s
+WHERE s.ds='2008-04-08' and s.hr='11'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- no input pruning, no sample filter
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE dest1 SELECT s.*
+FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s
+WHERE s.ds='2008-04-08' and s.hr='11'
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_TABREF
+ TOK_TABNAME
+ srcpart
+ TOK_TABLEBUCKETSAMPLE
+ 1
+ 1
+ TOK_FUNCTION
+ rand
+ s
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_TAB
+ TOK_TABNAME
+ dest1
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_ALLCOLREF
+ TOK_TABNAME
+ s
+ TOK_WHERE
+ and
+ =
+ .
+ TOK_TABLE_OR_COL
+ s
+ ds
+ '2008-04-08'
+ =
+ .
+ TOK_TABLE_OR_COL
+ s
+ hr
+ '11'
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: true
+ predicate: (((hash(rand()) & 2147483647) % 1) = 0) (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), '2008-04-08' (type: string), '11' (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value,dt,hr
+ columns.comments
+ columns.types int:string:string:string
+#### A masked pattern was here ####
+ name default.dest1
+ serialization.ddl struct dest1 { i32 key, string value, string dt, string hr}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+ TotalFiles: 1
+ GatherStats: true
+ MultiFileSpray: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=11
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ hr 11
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+ Truncated Path -> Alias:
+ /srcpart/ds=2008-04-08/hr=11 [s]
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value,dt,hr
+ columns.comments
+ columns.types int:string:string:string
+#### A masked pattern was here ####
+ name default.dest1
+ serialization.ddl struct dest1 { i32 key, string value, string dt, string hr}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.*
+FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s
+WHERE s.ds='2008-04-08' and s.hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Output: default@dest1
+POSTHOOK: query: INSERT OVERWRITE TABLE dest1 SELECT s.*
+FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s
+WHERE s.ds='2008-04-08' and s.hr='11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.dt SIMPLE [(srcpart)s.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: dest1.hr SIMPLE [(srcpart)s.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: dest1.key EXPRESSION [(srcpart)s.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.value SIMPLE [(srcpart)s.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT dest1.* FROM dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest1.* FROM dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+238 val_238 2008-04-08 11
+86 val_86 2008-04-08 11
+311 val_311 2008-04-08 11
+27 val_27 2008-04-08 11
+165 val_165 2008-04-08 11
+409 val_409 2008-04-08 11
+255 val_255 2008-04-08 11
+278 val_278 2008-04-08 11
+98 val_98 2008-04-08 11
+484 val_484 2008-04-08 11
+265 val_265 2008-04-08 11
+193 val_193 2008-04-08 11
+401 val_401 2008-04-08 11
+150 val_150 2008-04-08 11
+273 val_273 2008-04-08 11
+224 val_224 2008-04-08 11
+369 val_369 2008-04-08 11
+66 val_66 2008-04-08 11
+128 val_128 2008-04-08 11
+213 val_213 2008-04-08 11
+146 val_146 2008-04-08 11
+406 val_406 2008-04-08 11
+429 val_429 2008-04-08 11
+374 val_374 2008-04-08 11
+152 val_152 2008-04-08 11
+469 val_469 2008-04-08 11
+145 val_145 2008-04-08 11
+495 val_495 2008-04-08 11
+37 val_37 2008-04-08 11
+327 val_327 2008-04-08 11
+281 val_281 2008-04-08 11
+277 val_277 2008-04-08 11
+209 val_209 2008-04-08 11
+15 val_15 2008-04-08 11
+82 val_82 2008-04-08 11
+403 val_403 2008-04-08 11
+166 val_166 2008-04-08 11
+417 val_417 2008-04-08 11
+430 val_430 2008-04-08 11
+252 val_252 2008-04-08 11
+292 val_292 2008-04-08 11
+219 val_219 2008-04-08 11
+287 val_287 2008-04-08 11
+153 val_153 2008-04-08 11
+193 val_193 2008-04-08 11
+338 val_338 2008-04-08 11
+446 val_446 2008-04-08 11
+459 val_459 2008-04-08 11
+394 val_394 2008-04-08 11
+237 val_237 2008-04-08 11
+482 val_482 2008-04-08 11
+174 val_174 2008-04-08 11
+413 val_413 2008-04-08 11
+494 val_494 2008-04-08 11
+207 val_207 2008-04-08 11
+199 val_199 2008-04-08 11
+466 val_466 2008-04-08 11
+208 val_208 2008-04-08 11
+174 val_174 2008-04-08 11
+399 val_399 2008-04-08 11
+396 val_396 2008-04-08 11
+247 val_247 2008-04-08 11
+417 val_417 2008-04-08 11
+489 val_489 2008-04-08 11
+162 val_162 2008-04-08 11
+377 val_377 2008-04-08 11
+397 val_397 2008-04-08 11
+309 val_309 2008-04-08 11
+365 val_365 2008-04-08 11
+266 val_266 2008-04-08 11
+439 val_439 2008-04-08 11
+342 val_342 2008-04-08 11
+367 val_367 2008-04-08 11
+325 val_325 2008-04-08 11
+167 val_167 2008-04-08 11
+195 val_195 2008-04-08 11
+475 val_475 2008-04-08 11
+17 val_17 2008-04-08 11
+113 val_113 2008-04-08 11
+155 val_155 2008-04-08 11
+203 val_203 2008-04-08 11
+339 val_339 2008-04-08 11
+0 val_0 2008-04-08 11
+455 val_455 2008-04-08 11
+128 val_128 2008-04-08 11
+311 val_311 2008-04-08 11
+316 val_316 2008-04-08 11
+57 val_57 2008-04-08 11
+302 val_302 2008-04-08 11
+205 val_205 2008-04-08 11
+149 val_149 2008-04-08 11
+438 val_438 2008-04-08 11
+345 val_345 2008-04-08 11
+129 val_129 2008-04-08 11
+170 val_170 2008-04-08 11
+20 val_20 2008-04-08 11
+489 val_489 2008-04-08 11
+157 val_157 2008-04-08 11
+378 val_378 2008-04-08 11
+221 val_221 2008-04-08 11
+92 val_92 2008-04-08 11
+111 val_111 2008-04-08 11
+47 val_47 2008-04-08 11
+72 val_72 2008-04-08 11
+4 val_4 2008-04-08 11
+280 val_280 2008-04-08 11
+35 val_35 2008-04-08 11
+427 val_427 2008-04-08 11
+277 val_277 2008-04-08 11
+208 val_208 2008-04-08 11
+356 val_356 2008-04-08 11
+399 val_399 2008-04-08 11
+169 val_169 2008-04-08 11
+382 val_382 2008-04-08 11
+498 val_498 2008-04-08 11
+125 val_125 2008-04-08 11
+386 val_386 2008-04-08 11
+437 val_437 2008-04-08 11
+469 val_469 2008-04-08 11
+192 val_192 2008-04-08 11
+286 val_286 2008-04-08 11
+187 val_187 2008-04-08 11
+176 val_176 2008-04-08 11
+54 val_54 2008-04-08 11
+459 val_459 2008-04-08 11
+51 val_51 2008-04-08 11
+138 val_138 2008-04-08 11
+103 val_103 2008-04-08 11
+239 val_239 2008-04-08 11
+213 val_213 2008-04-08 11
+216 val_216 2008-04-08 11
+430 val_430 2008-04-08 11
+278 val_278 2008-04-08 11
+176 val_176 2008-04-08 11
+289 val_289 2008-04-08 11
+221 val_221 2008-04-08 11
+65 val_65 2008-04-08 11
+318 val_318 2008-04-08 11
+332 val_332 2008-04-08 11
+311 val_311 2008-04-08 11
+275 val_275 2008-04-08 11
+137 val_137 2008-04-08 11
+241 val_241 2008-04-08 11
+83 val_83 2008-04-08 11
+333 val_333 2008-04-08 11
+180 val_180 2008-04-08 11
+284 val_284 2008-04-08 11
+12 val_12 2008-04-08 11
+230 val_230 2008-04-08 11
+181 val_181 2008-04-08 11
+67 val_67 2008-04-08 11
+260 val_260 2008-04-08 11
+404 val_404 2008-04-08 11
+384 val_384 2008-04-08 11
+489 val_489 2008-04-08 11
+353 val_353 2008-04-08 11
+373 val_373 2008-04-08 11
+272 val_272 2008-04-08 11
+138 val_138 2008-04-08 11
+217 val_217 2008-04-08 11
+84 val_84 2008-04-08 11
+348 val_348 2008-04-08 11
+466 val_466 2008-04-08 11
+58 val_58 2008-04-08 11
+8 val_8 2008-04-08 11
+411 val_411 2008-04-08 11
+230 val_230 2008-04-08 11
+208 val_208 2008-04-08 11
+348 val_348 2008-04-08 11
+24 val_24 2008-04-08 11
+463 val_463 2008-04-08 11
+431 val_431 2008-04-08 11
+179 val_179 2008-04-08 11
+172 val_172 2008-04-08 11
+42 val_42 2008-04-08 11
+129 val_129 2008-04-08 11
+158 val_158 2008-04-08 11
+119 val_119 2008-04-08 11
+496 val_496 2008-04-08 11
+0 val_0 2008-04-08 11
+322 val_322 2008-04-08 11
+197 val_197 2008-04-08 11
+468 val_468 2008-04-08 11
+393 val_393 2008-04-08 11
+454 val_454 2008-04-08 11
+100 val_100 2008-04-08 11
+298 val_298 2008-04-08 11
+199 val_199 2008-04-08 11
+191 val_191 2008-04-08 11
+418 val_418 2008-04-08 11
+96 val_96 2008-04-08 11
+26 val_26 2008-04-08 11
+165 val_165 2008-04-08 11
+327 val_327 2008-04-08 11
+230 val_230 2008-04-08 11
+205 val_205 2008-04-08 11
+120 val_120 2008-04-08 11
+131 val_131 2008-04-08 11
+51 val_51 2008-04-08 11
+404 val_404 2008-04-08 11
+43 val_43 2008-04-08 11
+436 val_436 2008-04-08 11
+156 val_156 2008-04-08 11
+469 val_469 2008-04-08 11
+468 val_468 2008-04-08 11
+308 val_308 2008-04-08 11
+95 val_95 2008-04-08 11
+196 val_196 2008-04-08 11
+288 val_288 2008-04-08 11
+481 val_481 2008-04-08 11
+457 val_457 2008-04-08 11
+98 val_98 2008-04-08 11
+282 val_282 2008-04-08 11
+197 val_197 2008-04-08 11
+187 val_187 2008-04-08 11
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 11
+409 val_409 2008-04-08 11
+470 val_470 2008-04-08 11
+137 val_137 2008-04-08 11
+369 val_369 2008-04-08 11
+316 val_316 2008-04-08 11
+169 val_169 2008-04-08 11
+413 val_413 2008-04-08 11
+85 val_85 2008-04-08 11
+77 val_77 2008-04-08 11
+0 val_0 2008-04-08 11
+490 val_490 2008-04-08 11
+87 val_87 2008-04-08 11
+364 val_364 2008-04-08 11
+179 val_179 2008-04-08 11
+118 val_118 2008-04-08 11
+134 val_134 2008-04-08 11
+395 val_395 2008-04-08 11
+282 val_282 2008-04-08 11
+138 val_138 2008-04-08 11
+238 val_238 2008-04-08 11
+419 val_419 2008-04-08 11
+15 val_15 2008-04-08 11
+118 val_118 2008-04-08 11
+72 val_72 2008-04-08 11
+90 val_90 2008-04-08 11
+307 val_307 2008-04-08 11
+19 val_19 2008-04-08 11
+435 val_435 2008-04-08 11
+10 val_10 2008-04-08 11
+277 val_277 2008-04-08 11
+273 val_273 2008-04-08 11
+306 val_306 2008-04-08 11
+224 val_224 2008-04-08 11
+309 val_309 2008-04-08 11
+389 val_389 2008-04-08 11
+327 val_327 2008-04-08 11
+242 val_242 2008-04-08 11
+369 val_369 2008-04-08 11
+392 val_392 2008-04-08 11
+272 val_272 2008-04-08 11
+331 val_331 2008-04-08 11
+401 val_401 2008-04-08 11
+242 val_242 2008-04-08 11
+452 val_452 2008-04-08 11
+177 val_177 2008-04-08 11
+226 val_226 2008-04-08 11
+5 val_5 2008-04-08 11
+497 val_497 2008-04-08 11
+402 val_402 2008-04-08 11
+396 val_396 2008-04-08 11
+317 val_317 2008-04-08 11
+395 val_395 2008-04-08 11
+58 val_58 2008-04-08 11
+35 val_35 2008-04-08 11
+336 val_336 2008-04-08 11
+95 val_95 2008-04-08 11
+11 val_11 2008-04-08 11
+168 val_168 2008-04-08 11
+34 val_34 2008-04-08 11
+229 val_229 2008-04-08 11
+233 val_233 2008-04-08 11
+143 val_143 2008-04-08 11
+472 val_472 2008-04-08 11
+322 val_322 2008-04-08 11
+498 val_498 2008-04-08 11
+160 val_160 2008-04-08 11
+195 val_195 2008-04-08 11
+42 val_42 2008-04-08 11
+321 val_321 2008-04-08 11
+430 val_430 2008-04-08 11
+119 val_119 2008-04-08 11
+489 val_489 2008-04-08 11
+458 val_458 2008-04-08 11
+78 val_78 2008-04-08 11
+76 val_76 2008-04-08 11
+41 val_41 2008-04-08 11
+223 val_223 2008-04-08 11
+492 val_492 2008-04-08 11
+149 val_149 2008-04-08 11
+449 val_449 2008-04-08 11
+218 val_218 2008-04-08 11
+228 val_228 2008-04-08 11
+138 val_138 2008-04-08 11
+453 val_453 2008-04-08 11
+30 val_30 2008-04-08 11
+209 val_209 2008-04-08 11
+64 val_64 2008-04-08 11
+468 val_468 2008-04-08 11
+76 val_76 2008-04-08 11
+74 val_74 2008-04-08 11
+342 val_342 2008-04-08 11
+69 val_69 2008-04-08 11
+230 val_230 2008-04-08 11
+33 val_33 2008-04-08 11
+368 val_368 2008-04-08 11
+103 val_103 2008-04-08 11
+296 val_296 2008-04-08 11
+113 val_113 2008-04-08 11
+216 val_216 2008-04-08 11
+367 val_367 2008-04-08 11
+344 val_344 2008-04-08 11
+167 val_167 2008-04-08 11
+274 val_274 2008-04-08 11
+219 val_219 2008-04-08 11
+239 val_239 2008-04-08 11
+485 val_485 2008-04-08 11
+116 val_116 2008-04-08 11
+223 val_223 2008-04-08 11
+256 val_256 2008-04-08 11
+263 val_263 2008-04-08 11
+70 val_70 2008-04-08 11
+487 val_487 2008-04-08 11
+480 val_480 2008-04-08 11
+401 val_401 2008-04-08 11
+288 val_288 2008-04-08 11
+191 val_191 2008-04-08 11
+5 val_5 2008-04-08 11
+244 val_244 2008-04-08 11
+438 val_438 2008-04-08 11
+128 val_128 2008-04-08 11
+467 val_467 2008-04-08 11
+432 val_432 2008-04-08 11
+202 val_202 2008-04-08 11
+316 val_316 2008-04-08 11
+229 val_229 2008-04-08 11
+469 val_469 2008-04-08 11
+463 val_463 2008-04-08 11
+280 val_280 2008-04-08 11
+2 val_2 2008-04-08 11
+35 val_35 2008-04-08 11
+283 val_283 2008-04-08 11
+331 val_331 2008-04-08 11
+235 val_235 2008-04-08 11
+80 val_80 2008-04-08 11
+44 val_44 2008-04-08 11
+193 val_193 2008-04-08 11
+321 val_321 2008-04-08 11
+335 val_335 2008-04-08 11
+104 val_104 2008-04-08 11
+466 val_466 2008-04-08 11
+366 val_366 2008-04-08 11
+175 val_175 2008-04-08 11
+403 val_403 2008-04-08 11
+483 val_483 2008-04-08 11
+53 val_53 2008-04-08 11
+105 val_105 2008-04-08 11
+257 val_257 2008-04-08 11
+406 val_406 2008-04-08 11
+409 val_409 2008-04-08 11
+190 val_190 2008-04-08 11
+406 val_406 2008-04-08 11
+401 val_401 2008-04-08 11
+114 val_114 2008-04-08 11
+258 val_258 2008-04-08 11
+90 val_90 2008-04-08 11
+203 val_203 2008-04-08 11
+262 val_262 2008-04-08 11
+348 val_348 2008-04-08 11
+424 val_424 2008-04-08 11
+12 val_12 2008-04-08 11
+396 val_396 2008-04-08 11
+201 val_201 2008-04-08 11
+217 val_217 2008-04-08 11
+164 val_164 2008-04-08 11
+431 val_431 2008-04-08 11
+454 val_454 2008-04-08 11
+478 val_478 2008-04-08 11
+298 val_298 2008-04-08 11
+125 val_125 2008-04-08 11
+431 val_431 2008-04-08 11
+164 val_164 2008-04-08 11
+424 val_424 2008-04-08 11
+187 val_187 2008-04-08 11
+382 val_382 2008-04-08 11
+5 val_5 2008-04-08 11
+70 val_70 2008-04-08 11
+397 val_397 2008-04-08 11
+480 val_480 2008-04-08 11
+291 val_291 2008-04-08 11
+24 val_24 2008-04-08 11
+351 val_351 2008-04-08 11
+255 val_255 2008-04-08 11
+104 val_104 2008-04-08 11
+70 val_70 2008-04-08 11
+163 val_163 2008-04-08 11
+438 val_438 2008-04-08 11
+119 val_119 2008-04-08 11
+414 val_414 2008-04-08 11
+200 val_200 2008-04-08 11
+491 val_491 2008-04-08 11
+237 val_237 2008-04-08 11
+439 val_439 2008-04-08 11
+360 val_360 2008-04-08 11
+248 val_248 2008-04-08 11
+479 val_479 2008-04-08 11
+305 val_305 2008-04-08 11
+417 val_417 2008-04-08 11
+199 val_199 2008-04-08 11
+444 val_444 2008-04-08 11
+120 val_120 2008-04-08 11
+429 val_429 2008-04-08 11
+169 val_169 2008-04-08 11
+443 val_443 2008-04-08 11
+323 val_323 2008-04-08 11
+325 val_325 2008-04-08 11
+277 val_277 2008-04-08 11
+230 val_230 2008-04-08 11
+478 val_478 2008-04-08 11
+178 val_178 2008-04-08 11
+468 val_468 2008-04-08 11
+310 val_310 2008-04-08 11
+317 val_317 2008-04-08 11
+333 val_333 2008-04-08 11
+493 val_493 2008-04-08 11
+460 val_460 2008-04-08 11
+207 val_207 2008-04-08 11
+249 val_249 2008-04-08 11
+265 val_265 2008-04-08 11
+480 val_480 2008-04-08 11
+83 val_83 2008-04-08 11
+136 val_136 2008-04-08 11
+353 val_353 2008-04-08 11
+172 val_172 2008-04-08 11
+214 val_214 2008-04-08 11
+462 val_462 2008-04-08 11
+233 val_233 2008-04-08 11
+406 val_406 2008-04-08 11
+133 val_133 2008-04-08 11
+175 val_175 2008-04-08 11
+189 val_189 2008-04-08 11
+454 val_454 2008-04-08 11
+375 val_375 2008-04-08 11
+401 val_401 2008-04-08 11
+421 val_421 2008-04-08 11
+407 val_407 2008-04-08 11
+384 val_384 2008-04-08 11
+256 val_256 2008-04-08 11
+26 val_26 2008-04-08 11
+134 val_134 2008-04-08 11
+67 val_67 2008-04-08 11
+384 val_384 2008-04-08 11
+379 val_379 2008-04-08 11
+18 val_18 2008-04-08 11
+462 val_462 2008-04-08 11
+492 val_492 2008-04-08 11
+100 val_100 2008-04-08 11
+298 val_298 2008-04-08 11
+9 val_9 2008-04-08 11
+341 val_341 2008-04-08 11
+498 val_498 2008-04-08 11
+146 val_146 2008-04-08 11
+458 val_458 2008-04-08 11
+362 val_362 2008-04-08 11
+186 val_186 2008-04-08 11
+285 val_285 2008-04-08 11
+348 val_348 2008-04-08 11
+167 val_167 2008-04-08 11
+18 val_18 2008-04-08 11
+273 val_273 2008-04-08 11
+183 val_183 2008-04-08 11
+281 val_281 2008-04-08 11
+344 val_344 2008-04-08 11
+97 val_97 2008-04-08 11
+469 val_469 2008-04-08 11
+315 val_315 2008-04-08 11
+84 val_84 2008-04-08 11
+28 val_28 2008-04-08 11
+37 val_37 2008-04-08 11
+448 val_448 2008-04-08 11
+152 val_152 2008-04-08 11
+348 val_348 2008-04-08 11
+307 val_307 2008-04-08 11
+194 val_194 2008-04-08 11
+414 val_414 2008-04-08 11
+477 val_477 2008-04-08 11
+222 val_222 2008-04-08 11
+126 val_126 2008-04-08 11
+90 val_90 2008-04-08 11
+169 val_169 2008-04-08 11
+403 val_403 2008-04-08 11
+400 val_400 2008-04-08 11
+200 val_200 2008-04-08 11
+97 val_97 2008-04-08 11
+PREHOOK: query: select count(1) from srcbucket
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcbucket
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket
+#### A masked pattern was here ####
+1000
[48/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_join30.q.out b/ql/src/test/results/clientpositive/llap/auto_join30.q.out
new file mode 100644
index 0000000..3af7e85
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_join30.q.out
@@ -0,0 +1,1361 @@
+PREHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (BROADCAST_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 5 <- Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col2, _col3
+ input vertices:
+ 1 Reducer 5
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: sum(hash(_col2,_col3))
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 5
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+103231310608
+PREHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (BROADCAST_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 5 <- Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col2, _col3
+ input vertices:
+ 1 Reducer 5
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: sum(hash(_col2,_col3))
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 5
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+103231310608
+PREHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 4 <- Map 3 (SIMPLE_EDGE), Reducer 2 (BROADCAST_EDGE)
+ Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reducer 4
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col2, _col3
+ input vertices:
+ 0 Reducer 2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: sum(hash(_col2,_col3))
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 5
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+103231310608
+PREHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (BROADCAST_EDGE), Reducer 7 (BROADCAST_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 5 <- Map 4 (SIMPLE_EDGE)
+ Reducer 7 <- Map 6 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ 2 _col0 (type: string)
+ outputColumnNames: _col2, _col3
+ input vertices:
+ 1 Reducer 5
+ 2 Reducer 7
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: sum(hash(_col2,_col3))
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 5
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Reducer 7
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+348019368476
+PREHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+ Reducer 6 <- Map 5 (SIMPLE_EDGE)
+ Reducer 8 <- Map 7 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Map 7
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Left Outer Join0 to 2
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ 2 _col0 (type: string)
+ outputColumnNames: _col2, _col3
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(hash(_col2,_col3))
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 6
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Reducer 8
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+348019368476
+PREHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+ Reducer 6 <- Map 5 (SIMPLE_EDGE)
+ Reducer 8 <- Map 7 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Map 7
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ Left Outer Join0 to 2
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ 2 _col0 (type: string)
+ outputColumnNames: _col2, _col3
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(hash(_col2,_col3))
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 6
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Reducer 8
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+348019368476
+PREHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+ Reducer 6 <- Map 5 (SIMPLE_EDGE)
+ Reducer 8 <- Map 7 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Map 7
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ Right Outer Join0 to 2
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ 2 _col0 (type: string)
+ outputColumnNames: _col2, _col3
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(hash(_col2,_col3))
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 6
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Reducer 8
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+LEFT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+348019368476
+PREHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM
+(SELECT src.* FROM src sort by key) x
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+ Reducer 6 <- Map 5 (SIMPLE_EDGE)
+ Reducer 8 <- Map 7 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Map 7
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ Right Outer Join0 to 2
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ 2 _col0 (type: string)
+ outputColumnNames: _col2, _col3
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(hash(_col2,_col3))
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 6
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Reducer 8
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+(SELECT src.* FROM src sort by key) x
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Y
+ON (x.key = Y.key)
+RIGHT OUTER JOIN
+(SELECT src.* FROM src sort by value) Z
+ON (x.key = Z.key)
+select sum(hash(Y.key,Y.value))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+348019368476
[35/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_limit.q.out b/ql/src/test/results/clientpositive/llap/cbo_limit.q.out
new file mode 100644
index 0000000..13df214
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_limit.q.out
@@ -0,0 +1,90 @@
+PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit
+select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit
+select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+NULL NULL NULL
+PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+NULL NULL 1
+PREHOOK: query: select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1
+1
+1
+1
+1
+PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+NULL NULL
+NULL NULL
+1 1
+1 1
+1 1
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 12 6
+1 2 6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int, c desc limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int, c desc limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 12 6
+1 2 6
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_semijoin.q.out b/ql/src/test/results/clientpositive/llap/cbo_semijoin.q.out
new file mode 100644
index 0000000..bdd8125
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_semijoin.q.out
@@ -0,0 +1,440 @@
+PREHOOK: query: -- 12. SemiJoin
+select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 12. SemiJoin
+select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+PREHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+1.0 1 1
+PREHOOK: query: select * from (select cbo_t3.c_int, cbo_t1.c, b from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 = 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select cbo_t3.c_int, cbo_t1.c, b from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 = 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+1 1.0 1
+PREHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+PREHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select c_int, b, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+1 1 1.0
+PREHOOK: query: select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1 2 1
+ 1 2 1
+1 2 1
+1 12 1
+PREHOOK: query: select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1 2 1
+ 1 2 1
+1 2 1
+1 12 1
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_simple_select.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_simple_select.q.out b/ql/src/test/results/clientpositive/llap/cbo_simple_select.q.out
new file mode 100644
index 0000000..d161d9f
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_simple_select.q.out
@@ -0,0 +1,755 @@
+PREHOOK: query: -- 1. Test Select + TS
+select * from cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 1. Test Select + TS
+select * from cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+NULL NULL NULL NULL NULL 2014
+NULL NULL NULL NULL NULL 2014
+PREHOOK: query: select * from cbo_t1 as cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from cbo_t1 as cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+NULL NULL NULL NULL NULL 2014
+NULL NULL NULL NULL NULL 2014
+PREHOOK: query: select * from cbo_t1 as cbo_t2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from cbo_t1 as cbo_t2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+NULL NULL NULL NULL NULL 2014
+NULL NULL NULL NULL NULL 2014
+PREHOOK: query: select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+NULL NULL NULL
+NULL NULL NULL
+PREHOOK: query: select * from cbo_t1 where (((key=1) and (c_float=10)) and (c_int=20))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from cbo_t1 where (((key=1) and (c_float=10)) and (c_int=20))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+PREHOOK: query: -- 2. Test Select + TS + FIL
+select * from cbo_t1 where cbo_t1.c_int >= 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 2. Test Select + TS + FIL
+select * from cbo_t1 where cbo_t1.c_int >= 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+PREHOOK: query: -- 3 Test Select + Select + TS + FIL
+select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 3 Test Select + Select + TS + FIL
+select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+PREHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+PREHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+PREHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select cbo_t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 as cbo_t2 where cbo_t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as cbo_t1 where cbo_t1.c_int >= 0 and y+c_int >= 0 or x <= 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+ 1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+1 1 25.0
+PREHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 where cbo_t1.c_int >= 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+PREHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t2 where cbo_t2.c_int >= 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+2.0 1 25.0
+PREHOOK: query: -- 13. null expr in select list
+select null from cbo_t3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: -- 13. null expr in select list
+select null from cbo_t3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+PREHOOK: query: -- 14. unary operator
+select key from cbo_t1 where c_int = -6 or c_int = +6
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 14. unary operator
+select key from cbo_t1 where c_int = -6 or c_int = +6
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+PREHOOK: query: -- 15. query referencing only partition columns
+select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 15. query referencing only partition columns
+select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+400
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_stats.q.out b/ql/src/test/results/clientpositive/llap/cbo_stats.q.out
new file mode 100644
index 0000000..554a8f0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_stats.q.out
@@ -0,0 +1,14 @@
+PREHOOK: query: -- 20. Test get stats with empty partition list
+select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 20. Test get stats with empty partition list
+select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_subq_exists.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_subq_exists.q.out b/ql/src/test/results/clientpositive/llap/cbo_subq_exists.q.out
new file mode 100644
index 0000000..50bfbe2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_subq_exists.q.out
@@ -0,0 +1,297 @@
+PREHOOK: query: -- 18. SubQueries Not Exists
+-- distinct, corr
+select *
+from src_cbo b
+where not exists
+ (select distinct a.key
+ from src_cbo a
+ where b.value = a.value and a.value > 'val_2'
+ )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- 18. SubQueries Not Exists
+-- distinct, corr
+select *
+from src_cbo b
+where not exists
+ (select distinct a.key
+ from src_cbo a
+ where b.value = a.value and a.value > 'val_2'
+ )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+118 val_118
+119 val_119
+119 val_119
+119 val_119
+12 val_12
+12 val_12
+120 val_120
+120 val_120
+125 val_125
+125 val_125
+126 val_126
+128 val_128
+128 val_128
+128 val_128
+129 val_129
+129 val_129
+131 val_131
+133 val_133
+134 val_134
+134 val_134
+136 val_136
+137 val_137
+137 val_137
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+143 val_143
+145 val_145
+146 val_146
+146 val_146
+149 val_149
+149 val_149
+15 val_15
+15 val_15
+150 val_150
+152 val_152
+152 val_152
+153 val_153
+155 val_155
+156 val_156
+157 val_157
+158 val_158
+160 val_160
+162 val_162
+163 val_163
+164 val_164
+164 val_164
+165 val_165
+165 val_165
+166 val_166
+167 val_167
+167 val_167
+167 val_167
+168 val_168
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+17 val_17
+170 val_170
+172 val_172
+172 val_172
+174 val_174
+174 val_174
+175 val_175
+175 val_175
+176 val_176
+176 val_176
+177 val_177
+178 val_178
+179 val_179
+179 val_179
+18 val_18
+18 val_18
+180 val_180
+181 val_181
+183 val_183
+186 val_186
+187 val_187
+187 val_187
+187 val_187
+189 val_189
+19 val_19
+190 val_190
+191 val_191
+191 val_191
+192 val_192
+193 val_193
+193 val_193
+193 val_193
+194 val_194
+195 val_195
+195 val_195
+196 val_196
+197 val_197
+197 val_197
+199 val_199
+199 val_199
+199 val_199
+2 val_2
+PREHOOK: query: -- no agg, corr, having
+select *
+from src_cbo b
+group by key, value
+having not exists
+ (select a.key
+ from src_cbo a
+ where b.value = a.value and a.key = b.key and a.value > 'val_12'
+ )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- no agg, corr, having
+select *
+from src_cbo b
+group by key, value
+having not exists
+ (select a.key
+ from src_cbo a
+ where b.value = a.value and a.key = b.key and a.value > 'val_12'
+ )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+0 val_0
+10 val_10
+100 val_100
+103 val_103
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+119 val_119
+12 val_12
+PREHOOK: query: -- 19. SubQueries Exists
+-- view test
+create view cv1 as
+select *
+from src_cbo b
+where exists
+ (select a.key
+ from src_cbo a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_cbo
+PREHOOK: Output: database:default
+PREHOOK: Output: default@cv1
+POSTHOOK: query: -- 19. SubQueries Exists
+-- view test
+create view cv1 as
+select *
+from src_cbo b
+where exists
+ (select a.key
+ from src_cbo a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_cbo
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@cv1
+PREHOOK: query: select * from cv1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cv1
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: select * from cv1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cv1
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+90 val_90
+90 val_90
+90 val_90
+92 val_92
+95 val_95
+95 val_95
+96 val_96
+97 val_97
+97 val_97
+98 val_98
+98 val_98
+PREHOOK: query: -- sq in from
+select *
+from (select *
+ from src_cbo b
+ where exists
+ (select a.key
+ from src_cbo a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9')
+ ) a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- sq in from
+select *
+from (select *
+ from src_cbo b
+ where exists
+ (select a.key
+ from src_cbo a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9')
+ ) a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+90 val_90
+90 val_90
+90 val_90
+92 val_92
+95 val_95
+95 val_95
+96 val_96
+97 val_97
+97 val_97
+98 val_98
+98 val_98
+PREHOOK: query: -- sq in from, having
+select *
+from (select b.key, count(*)
+ from src_cbo b
+ group by b.key
+ having exists
+ (select a.key
+ from src_cbo a
+ where a.key = b.key and a.value > 'val_9'
+ )
+) a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- sq in from, having
+select *
+from (select b.key, count(*)
+ from src_cbo b
+ group by b.key
+ having exists
+ (select a.key
+ from src_cbo a
+ where a.key = b.key and a.value > 'val_9'
+ )
+) a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+90 3
+92 1
+95 2
+96 1
+97 2
+98 2
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_subq_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_subq_in.q.out b/ql/src/test/results/clientpositive/llap/cbo_subq_in.q.out
new file mode 100644
index 0000000..f6bfad2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_subq_in.q.out
@@ -0,0 +1,151 @@
+PREHOOK: query: -- 17. SubQueries In
+-- non agg, non corr
+select *
+from src_cbo
+where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- 17. SubQueries In
+-- non agg, non corr
+select *
+from src_cbo
+where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+90 val_90
+90 val_90
+90 val_90
+92 val_92
+95 val_95
+95 val_95
+96 val_96
+97 val_97
+97 val_97
+98 val_98
+98 val_98
+PREHOOK: query: -- agg, corr
+-- add back once rank issue fixed for cbo
+
+-- distinct, corr
+select *
+from src_cbo b
+where b.key in
+ (select distinct a.key
+ from src_cbo a
+ where b.value = a.value and a.key > '9'
+ ) order by b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- agg, corr
+-- add back once rank issue fixed for cbo
+
+-- distinct, corr
+select *
+from src_cbo b
+where b.key in
+ (select distinct a.key
+ from src_cbo a
+ where b.value = a.value and a.key > '9'
+ ) order by b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+90 val_90
+90 val_90
+90 val_90
+92 val_92
+95 val_95
+95 val_95
+96 val_96
+97 val_97
+97 val_97
+98 val_98
+98 val_98
+PREHOOK: query: -- non agg, corr, with join in Parent Query
+select p.p_partkey, li.l_suppkey
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey
+where li.l_linenumber = 1 and
+ li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
+ order by p.p_partkey
+PREHOOK: type: QUERY
+PREHOOK: Input: default@lineitem
+#### A masked pattern was here ####
+POSTHOOK: query: -- non agg, corr, with join in Parent Query
+select p.p_partkey, li.l_suppkey
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey
+where li.l_linenumber = 1 and
+ li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
+ order by p.p_partkey
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@lineitem
+#### A masked pattern was here ####
+4297 1798
+108570 8571
+PREHOOK: query: -- where and having
+-- Plan is:
+-- Stage 1: b semijoin sq1:src_cbo (subquery in where)
+-- Stage 2: group by Stage 1 o/p
+-- Stage 5: group by on sq2:src_cbo (subquery in having)
+-- Stage 6: Stage 2 o/p semijoin Stage 5
+select key, value, count(*)
+from src_cbo b
+where b.key in (select key from src_cbo where src_cbo.key > '8')
+group by key, value
+having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- where and having
+-- Plan is:
+-- Stage 1: b semijoin sq1:src_cbo (subquery in where)
+-- Stage 2: group by Stage 1 o/p
+-- Stage 5: group by on sq2:src_cbo (subquery in having)
+-- Stage 6: Stage 2 o/p semijoin Stage 5
+select key, value, count(*)
+from src_cbo b
+where b.key in (select key from src_cbo where src_cbo.key > '8')
+group by key, value
+having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+80 val_80 1
+82 val_82 1
+83 val_83 2
+84 val_84 2
+85 val_85 1
+86 val_86 1
+87 val_87 1
+9 val_9 1
+90 val_90 3
+92 val_92 1
+95 val_95 2
+96 val_96 1
+97 val_97 2
+98 val_98 2
+PREHOOK: query: -- non agg, non corr, windowing
+select p_mfgr, p_name, avg(p_size)
+from part
+group by p_mfgr, p_name
+having p_name in
+ (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) order by p_mfgr
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- non agg, non corr, windowing
+select p_mfgr, p_name, avg(p_size)
+from part
+group by p_mfgr, p_name
+having p_name in
+ (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) order by p_mfgr
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2.0
+Manufacturer#2 almond aquamarine midnight light salmon 2.0
+Manufacturer#3 almond antique misty red olive 1.0
+Manufacturer#4 almond aquamarine yellow dodger mint 7.0
+Manufacturer#5 almond antique sky peru orange 2.0
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_subq_not_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_subq_not_in.q.out b/ql/src/test/results/clientpositive/llap/cbo_subq_not_in.q.out
new file mode 100644
index 0000000..c7274f7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_subq_not_in.q.out
@@ -0,0 +1,365 @@
+PREHOOK: query: -- 16. SubQueries Not In
+-- non agg, non corr
+select *
+from src_cbo
+where src_cbo.key not in
+ ( select key from src_cbo s1
+ where s1.key > '2'
+ ) order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+POSTHOOK: query: -- 16. SubQueries Not In
+-- non agg, non corr
+select *
+from src_cbo
+where src_cbo.key not in
+ ( select key from src_cbo s1
+ where s1.key > '2'
+ ) order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_cbo
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+118 val_118
+119 val_119
+119 val_119
+119 val_119
+12 val_12
+12 val_12
+120 val_120
+120 val_120
+125 val_125
+125 val_125
+126 val_126
+128 val_128
+128 val_128
+128 val_128
+129 val_129
+129 val_129
+131 val_131
+133 val_133
+134 val_134
+134 val_134
+136 val_136
+137 val_137
+137 val_137
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+143 val_143
+145 val_145
+146 val_146
+146 val_146
+149 val_149
+149 val_149
+15 val_15
+15 val_15
+150 val_150
+152 val_152
+152 val_152
+153 val_153
+155 val_155
+156 val_156
+157 val_157
+158 val_158
+160 val_160
+162 val_162
+163 val_163
+164 val_164
+164 val_164
+165 val_165
+165 val_165
+166 val_166
+167 val_167
+167 val_167
+167 val_167
+168 val_168
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+17 val_17
+170 val_170
+172 val_172
+172 val_172
+174 val_174
+174 val_174
+175 val_175
+175 val_175
+176 val_176
+176 val_176
+177 val_177
+178 val_178
+179 val_179
+179 val_179
+18 val_18
+18 val_18
+180 val_180
+181 val_181
+183 val_183
+186 val_186
+187 val_187
+187 val_187
+187 val_187
+189 val_189
+19 val_19
+190 val_190
+191 val_191
+191 val_191
+192 val_192
+193 val_193
+193 val_193
+193 val_193
+194 val_194
+195 val_195
+195 val_195
+196 val_196
+197 val_197
+197 val_197
+199 val_199
+199 val_199
+199 val_199
+2 val_2
+PREHOOK: query: -- non agg, corr
+select p_mfgr, b.p_name, p_size
+from part b
+where b.p_name not in
+ (select p_name
+ from (select p_mfgr, p_name, p_size as r from part) a
+ where r < 10 and b.p_mfgr = a.p_mfgr
+ ) order by p_mfgr,p_size
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- non agg, corr
+select p_mfgr, b.p_name, p_size
+from part b
+where b.p_name not in
+ (select p_name
+ from (select p_mfgr, p_name, p_size as r from part) a
+ where r < 10 and b.p_mfgr = a.p_mfgr
+ ) order by p_mfgr,p_size
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond aquamarine burnished black steel 28
+Manufacturer#1 almond antique chartreuse lavender yellow 34
+Manufacturer#1 almond aquamarine pink moccasin thistle 42
+Manufacturer#2 almond antique violet chocolate turquoise 14
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18
+Manufacturer#2 almond aquamarine rose maroon antique 25
+Manufacturer#2 almond antique violet turquoise frosted 40
+Manufacturer#3 almond antique forest lavender goldenrod 14
+Manufacturer#3 almond antique chartreuse khaki white 17
+Manufacturer#3 almond antique metallic orange dim 19
+Manufacturer#3 almond antique olive coral navajo 45
+Manufacturer#4 almond antique gainsboro frosted violet 10
+Manufacturer#4 almond azure aquamarine papaya violet 12
+Manufacturer#4 almond aquamarine floral ivory bisque 27
+Manufacturer#4 almond antique violet mint lemon 39
+Manufacturer#5 almond azure blanched chiffon midnight 23
+Manufacturer#5 almond antique blue firebrick mint 31
+Manufacturer#5 almond aquamarine dodger light gainsboro 46
+PREHOOK: query: -- agg, non corr
+select p_name, p_size
+from
+part where part.p_size not in
+ (select avg(p_size)
+ from (select p_size from part) a
+ where p_size < 10
+ ) order by p_name
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- agg, non corr
+select p_name, p_size
+from
+part where part.p_size not in
+ (select avg(p_size)
+ from (select p_size from part) a
+ where p_size < 10
+ ) order by p_name
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+almond antique blue firebrick mint 31
+almond antique burnished rose metallic 2
+almond antique burnished rose metallic 2
+almond antique chartreuse khaki white 17
+almond antique chartreuse lavender yellow 34
+almond antique forest lavender goldenrod 14
+almond antique gainsboro frosted violet 10
+almond antique medium spring khaki 6
+almond antique metallic orange dim 19
+almond antique misty red olive 1
+almond antique olive coral navajo 45
+almond antique salmon chartreuse burlywood 6
+almond antique sky peru orange 2
+almond antique violet chocolate turquoise 14
+almond antique violet mint lemon 39
+almond antique violet turquoise frosted 40
+almond aquamarine burnished black steel 28
+almond aquamarine dodger light gainsboro 46
+almond aquamarine floral ivory bisque 27
+almond aquamarine midnight light salmon 2
+almond aquamarine pink moccasin thistle 42
+almond aquamarine rose maroon antique 25
+almond aquamarine sandy cyan gainsboro 18
+almond aquamarine yellow dodger mint 7
+almond azure aquamarine papaya violet 12
+almond azure blanched chiffon midnight 23
+PREHOOK: query: -- agg, corr
+select p_mfgr, p_name, p_size
+from part b where b.p_size not in
+ (select min(p_size)
+ from (select p_mfgr, p_size from part) a
+ where p_size < 10 and b.p_mfgr = a.p_mfgr
+ ) order by p_name
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- agg, corr
+select p_mfgr, p_name, p_size
+from part b where b.p_size not in
+ (select min(p_size)
+ from (select p_mfgr, p_size from part) a
+ where p_size < 10 and b.p_mfgr = a.p_mfgr
+ ) order by p_name
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#5 almond antique blue firebrick mint 31
+Manufacturer#3 almond antique chartreuse khaki white 17
+Manufacturer#1 almond antique chartreuse lavender yellow 34
+Manufacturer#3 almond antique forest lavender goldenrod 14
+Manufacturer#4 almond antique gainsboro frosted violet 10
+Manufacturer#5 almond antique medium spring khaki 6
+Manufacturer#3 almond antique metallic orange dim 19
+Manufacturer#3 almond antique olive coral navajo 45
+Manufacturer#1 almond antique salmon chartreuse burlywood 6
+Manufacturer#2 almond antique violet chocolate turquoise 14
+Manufacturer#4 almond antique violet mint lemon 39
+Manufacturer#2 almond antique violet turquoise frosted 40
+Manufacturer#1 almond aquamarine burnished black steel 28
+Manufacturer#5 almond aquamarine dodger light gainsboro 46
+Manufacturer#4 almond aquamarine floral ivory bisque 27
+Manufacturer#1 almond aquamarine pink moccasin thistle 42
+Manufacturer#2 almond aquamarine rose maroon antique 25
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18
+Manufacturer#4 almond azure aquamarine papaya violet 12
+Manufacturer#5 almond azure blanched chiffon midnight 23
+PREHOOK: query: -- non agg, non corr, Group By in Parent Query
+select li.l_partkey, count(*)
+from lineitem li
+where li.l_linenumber = 1 and
+ li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR')
+group by li.l_partkey order by li.l_partkey
+PREHOOK: type: QUERY
+PREHOOK: Input: default@lineitem
+#### A masked pattern was here ####
+POSTHOOK: query: -- non agg, non corr, Group By in Parent Query
+select li.l_partkey, count(*)
+from lineitem li
+where li.l_linenumber = 1 and
+ li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR')
+group by li.l_partkey order by li.l_partkey
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@lineitem
+#### A masked pattern was here ####
+450 1
+7068 1
+21636 1
+22630 1
+59694 1
+61931 1
+85951 1
+88035 1
+88362 1
+106170 1
+119477 1
+119767 1
+123076 1
+139636 1
+175839 1
+182052 1
+PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved.
+
+-- non agg, corr, having
+select b.p_mfgr, min(p_retailprice)
+from part b
+group by b.p_mfgr
+having b.p_mfgr not in
+ (select p_mfgr
+ from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a
+ where min(p_retailprice) = l and r - l > 600
+ )
+ order by b.p_mfgr
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved.
+
+-- non agg, corr, having
+select b.p_mfgr, min(p_retailprice)
+from part b
+group by b.p_mfgr
+having b.p_mfgr not in
+ (select p_mfgr
+ from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a
+ where min(p_retailprice) = l and r - l > 600
+ )
+ order by b.p_mfgr
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 1173.15
+Manufacturer#2 1690.68
+PREHOOK: query: -- agg, non corr, having
+select b.p_mfgr, min(p_retailprice)
+from part b
+group by b.p_mfgr
+having b.p_mfgr not in
+ (select p_mfgr
+ from part a
+ group by p_mfgr
+ having max(p_retailprice) - min(p_retailprice) > 600
+ )
+ order by b.p_mfgr
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: -- agg, non corr, having
+select b.p_mfgr, min(p_retailprice)
+from part b
+group by b.p_mfgr
+having b.p_mfgr not in
+ (select p_mfgr
+ from part a
+ group by p_mfgr
+ having max(p_retailprice) - min(p_retailprice) > 600
+ )
+ order by b.p_mfgr
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 1173.15
+Manufacturer#2 1690.68
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_udf_udaf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_udf_udaf.q.out b/ql/src/test/results/clientpositive/llap/cbo_udf_udaf.q.out
new file mode 100644
index 0000000..156d02f
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_udf_udaf.q.out
@@ -0,0 +1,125 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 8. Test UDF/UDAF
+select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 8. Test UDF/UDAF
+select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+20 18 18 1.0 1 1
+PREHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*), count(c_int) as a, sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from cbo_t1 group by c_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+18 18 18 1.0 1 1 2 36
+2 0 NULL NULL NULL NULL 3 6
+PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+20 1 18 1.0 1 1
+PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from cbo_t1 group by c_int) cbo_t1 order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+18 1 18 1.0 1 1 2 36
+2 0 NULL NULL NULL NULL 3 6
+PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 20 1 18
+PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 20 1 1
+PREHOOK: query: select key,count(c_int) as a, avg(c_float) from cbo_t1 group by key order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select key,count(c_int) as a, avg(c_float) from cbo_t1 group by key order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+ 1 2 1.0
+ 1 2 1.0
+1 12 1.0
+1 2 1.0
+NULL 0 NULL
+PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0 NULL
+1 1.0
+PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0 NULL
+1 1.0
+PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float, c_int order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0 NULL
+1 1.0
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_union.q.out b/ql/src/test/results/clientpositive/llap/cbo_union.q.out
new file mode 100644
index 0000000..fb86d22
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_union.q.out
@@ -0,0 +1,920 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 11. Union All
+select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 11. Union All
+select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+ 1 1 1 1.0 true 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 false 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+1 1 1 1.0 true 2014
+2 2 2 2.0 true 2014
+2 2 2 2.0 true 2014
+2 2 2 2.0 true 2014
+2 2 2 2.0 true 2014
+2 2 2 2.0 true 2014
+NULL NULL NULL NULL NULL 2014
+NULL NULL NULL NULL NULL 2014
+NULL NULL NULL NULL NULL 2014
+NULL NULL NULL NULL NULL 2014
+PREHOOK: query: select key from (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r2 where key >=0 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select key from (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r2 where key >=0 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+2
+2
+2
+2
+2
+2
+2
+2
+3
+3
+3
+PREHOOK: query: select r2.key from (select key, c_int from (select key, c_int from cbo_t1 union all select key, c_int from cbo_t3 )r1 union all select key, c_int from cbo_t3)r2 join (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select r2.key from (select key, c_int from (select key, c_int from cbo_t1 union all select key, c_int from cbo_t3 )r1 union all select key, c_int from cbo_t3)r2 join (select key, c_int from (select * from cbo_t1 union all select * from cbo_t2 where cbo_t2.key >=0)r1 union all select key, c_int from cbo_t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+2
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
+3
[45/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out
new file mode 100644
index 0000000..377e4b5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out
@@ -0,0 +1,645 @@
+PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket
+
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket
+
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: CREATE TABLE bucket_medium (key string, value string) partitioned by (ds string)
+CLUSTERED BY (key) SORTED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_medium
+POSTHOOK: query: CREATE TABLE bucket_medium (key string, value string) partitioned by (ds string)
+CLUSTERED BY (key) SORTED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_medium
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_medium
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_medium
+POSTHOOK: Output: default@bucket_medium@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_medium@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_medium@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_medium@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_medium@ds=2008-04-08
+Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Map 3' is a cross product
+PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_JOIN
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_medium
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ c
+ =
+ .
+ TOK_TABLE_OR_COL
+ c
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_medium
+ d
+ =
+ .
+ TOK_TABLE_OR_COL
+ c
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Map 3 <- Map 2 (BROADCAST_EDGE), Map 5 (BROADCAST_EDGE)
+ Reducer 4 <- Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 1 Data size: 170 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 170 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 1 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ outputColumnNames: _col6
+ input vertices:
+ 0 Map 1
+ Position of Big Table: 1
+ Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Reduce Output Operator
+ key expressions: _col6 (type: string), _col6 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col6 (type: string), _col6 (type: string)
+ Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 3
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_medium
+ numFiles 3
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_medium { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 170
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 3
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_medium
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_medium { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_medium
+ name: default.bucket_medium
+ Truncated Path -> Alias:
+ /bucket_medium/ds=2008-04-08 [b]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 2 => 1
+ keys:
+ 0 _col6 (type: string), _col6 (type: string)
+ 1 key (type: string), key (type: string)
+ input vertices:
+ 0 Map 2
+ Position of Big Table: 1
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 5 => 1
+ keys:
+ 0
+ 1
+ input vertices:
+ 1 Map 5
+ Position of Big Table: 0
+ Statistics: Num rows: 69 Data size: 7032 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [c]
+ /bucket_big/ds=2008-04-09 [c]
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: d
+ Statistics: Num rows: 1 Data size: 170 Basic stats: PARTIAL Column stats: NONE
+ GatherStats: false
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 170 Basic stats: PARTIAL Column stats: NONE
+ tag: 1
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 3
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_medium
+ numFiles 3
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_medium { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 170
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 3
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_medium
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_medium { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_medium
+ name: default.bucket_medium
+ Truncated Path -> Alias:
+ /bucket_medium/ds=2008-04-08 [d]
+ Reducer 4
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Map 3' is a cross product
+PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_medium
+PREHOOK: Input: default@bucket_medium@ds=2008-04-08
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_medium
+POSTHOOK: Input: default@bucket_medium@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+570
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out
new file mode 100644
index 0000000..639ea41
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out
@@ -0,0 +1,692 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl1
+PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl2
+PREHOOK: query: insert overwrite table tbl1 select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: insert overwrite table tbl1 select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl1
+POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table tbl2 select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: insert overwrite table tbl2 select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl2
+POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: CREATE TABLE dest1(k1 int, k2 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(k1 int, k2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: CREATE TABLE dest2(k1 string, k2 string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest2
+POSTHOOK: query: CREATE TABLE dest2(k1 string, k2 string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest2
+PREHOOK: query: -- A SMB join followed by a mutli-insert
+explain
+from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- A SMB join followed by a mutli-insert
+explain
+from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-3 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-3
+ Stage-4 depends on stages: Stage-0
+ Stage-1 depends on stages: Stage-3
+ Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col2 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+ Select Operator
+ expressions: _col1 (type: string), _col3 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest2
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+
+ Stage: Stage-3
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-4
+ Stats-Aggr Operator
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest2
+
+ Stage: Stage-5
+ Stats-Aggr Operator
+
+PREHOOK: query: from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+PREHOOK: Output: default@dest1
+PREHOOK: Output: default@dest2
+POSTHOOK: query: from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+POSTHOOK: Output: default@dest1
+POSTHOOK: Output: default@dest2
+POSTHOOK: Lineage: dest1.k1 SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: dest1.k2 SIMPLE [(tbl2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: dest2.k1 SIMPLE [(tbl1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: dest2.k2 SIMPLE [(tbl2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+2 2
+4 4
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+8 8
+9 9
+PREHOOK: query: select * from dest2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest2
+#### A masked pattern was here ####
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_2 val_2
+val_4 val_4
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_8 val_8
+val_9 val_9
+PREHOOK: query: -- A SMB join followed by a mutli-insert
+explain
+from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- A SMB join followed by a mutli-insert
+explain
+from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-3 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-3
+ Stage-4 depends on stages: Stage-0
+ Stage-1 depends on stages: Stage-3
+ Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col2 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+ Select Operator
+ expressions: _col1 (type: string), _col3 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest2
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+
+ Stage: Stage-3
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-4
+ Stats-Aggr Operator
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest2
+
+ Stage: Stage-5
+ Stats-Aggr Operator
+
+PREHOOK: query: from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+PREHOOK: Output: default@dest1
+PREHOOK: Output: default@dest2
+POSTHOOK: query: from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+POSTHOOK: Output: default@dest1
+POSTHOOK: Output: default@dest2
+POSTHOOK: Lineage: dest1.k1 SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: dest1.k2 SIMPLE [(tbl2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: dest2.k1 SIMPLE [(tbl1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: dest2.k2 SIMPLE [(tbl2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+2 2
+4 4
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+8 8
+9 9
+PREHOOK: query: select * from dest2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest2
+#### A masked pattern was here ####
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_2 val_2
+val_4 val_4
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_8 val_8
+val_9 val_9
+PREHOOK: query: -- A SMB join followed by a mutli-insert
+explain
+from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- A SMB join followed by a mutli-insert
+explain
+from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-3 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-3
+ Stage-4 depends on stages: Stage-0
+ Stage-1 depends on stages: Stage-3
+ Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col2 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+ Select Operator
+ expressions: _col1 (type: string), _col3 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest2
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+
+ Stage: Stage-3
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-4
+ Stats-Aggr Operator
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest2
+
+ Stage: Stage-5
+ Stats-Aggr Operator
+
+PREHOOK: query: from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+PREHOOK: Output: default@dest1
+PREHOOK: Output: default@dest2
+POSTHOOK: query: from (
+ SELECT a.key key1, a.value value1, b.key key2, b.value value2
+ FROM tbl1 a JOIN tbl2 b
+ ON a.key = b.key ) subq
+INSERT OVERWRITE TABLE dest1 select key1, key2
+INSERT OVERWRITE TABLE dest2 select value1, value2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+POSTHOOK: Output: default@dest1
+POSTHOOK: Output: default@dest2
+POSTHOOK: Lineage: dest1.k1 SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: dest1.k2 SIMPLE [(tbl2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: dest2.k1 SIMPLE [(tbl1)a.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: dest2.k2 SIMPLE [(tbl2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+0 0
+2 2
+4 4
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+8 8
+9 9
+PREHOOK: query: select * from dest2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest2
+#### A masked pattern was here ####
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_0 val_0
+val_2 val_2
+val_4 val_4
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_5 val_5
+val_8 val_8
+val_9 val_9
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_14.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_14.q.out
new file mode 100644
index 0000000..ad5a814
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_14.q.out
@@ -0,0 +1,224 @@
+PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl1
+PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl2
+PREHOOK: query: insert overwrite table tbl1 select * from src where key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: insert overwrite table tbl1 select * from src where key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl1
+POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table tbl2 select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: insert overwrite table tbl2 select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl2
+POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed
+explain
+select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed
+explain
+select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+32
+PREHOOK: query: insert overwrite table tbl2 select * from src where key < 200
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: insert overwrite table tbl2 select * from src where key < 200
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl2
+POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed
+explain
+select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed
+explain
+select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 189 Data size: 1891 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 207 Data size: 2080 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+207
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_15.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_15.q.out
new file mode 100644
index 0000000..8c2e080
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_15.q.out
@@ -0,0 +1,188 @@
+PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl1
+PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl2
+PREHOOK: query: insert overwrite table tbl1 select * from src where key < 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: insert overwrite table tbl1 select * from src where key < 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl1
+POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table tbl2 select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: insert overwrite table tbl2 select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl2
+POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_16.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_16.q.out
new file mode 100644
index 0000000..d4ecb19
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_16.q.out
@@ -0,0 +1,256 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE stage_bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (file_tag STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stage_bucket_big
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE stage_bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (file_tag STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stage_bucket_big
+PREHOOK: query: CREATE TABLE bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (day STRING, pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (day STRING, pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: CREATE TABLE stage_bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (file_tag STRING)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stage_bucket_small
+POSTHOOK: query: CREATE TABLE stage_bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (file_tag STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stage_bucket_small
+PREHOOK: query: CREATE TABLE bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: CREATE TABLE bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='1')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@stage_bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='1')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@stage_bucket_small
+POSTHOOK: Output: default@stage_bucket_small@file_tag=1
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='2')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@stage_bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='2')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@stage_bucket_small
+POSTHOOK: Output: default@stage_bucket_small@file_tag=2
+PREHOOK: query: insert overwrite table bucket_small partition(pri)
+select
+key,
+value,
+file_tag as pri
+from
+stage_bucket_small
+where file_tag between 1 and 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stage_bucket_small
+PREHOOK: Input: default@stage_bucket_small@file_tag=1
+PREHOOK: Input: default@stage_bucket_small@file_tag=2
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: insert overwrite table bucket_small partition(pri)
+select
+key,
+value,
+file_tag as pri
+from
+stage_bucket_small
+where file_tag between 1 and 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stage_bucket_small
+POSTHOOK: Input: default@stage_bucket_small@file_tag=1
+POSTHOOK: Input: default@stage_bucket_small@file_tag=2
+POSTHOOK: Output: default@bucket_small@pri=1
+POSTHOOK: Output: default@bucket_small@pri=2
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).key SIMPLE [(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint, comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).value SIMPLE [(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).key SIMPLE [(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint, comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).value SIMPLE [(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' overwrite into table stage_bucket_big partition (file_tag='1')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@stage_bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' overwrite into table stage_bucket_big partition (file_tag='1')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@stage_bucket_big
+POSTHOOK: Output: default@stage_bucket_big@file_tag=1
+PREHOOK: query: insert overwrite table bucket_big partition(day,pri)
+select
+key,
+value,
+'day1' as day,
+1 as pri
+from
+stage_bucket_big
+where
+file_tag='1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stage_bucket_big
+PREHOOK: Input: default@stage_bucket_big@file_tag=1
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: insert overwrite table bucket_big partition(day,pri)
+select
+key,
+value,
+'day1' as day,
+1 as pri
+from
+stage_bucket_big
+where
+file_tag='1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stage_bucket_big
+POSTHOOK: Input: default@stage_bucket_big@file_tag=1
+POSTHOOK: Output: default@bucket_big@day=day1/pri=1
+POSTHOOK: Lineage: bucket_big PARTITION(day=day1,pri=1).key SIMPLE [(stage_bucket_big)stage_bucket_big.FieldSchema(name:key, type:bigint, comment:null), ]
+POSTHOOK: Lineage: bucket_big PARTITION(day=day1,pri=1).value SIMPLE [(stage_bucket_big)stage_bucket_big.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select
+a.key ,
+a.value ,
+b.value ,
+'day1' as day,
+1 as pri
+from
+(
+select
+key,
+value
+from bucket_big where day='day1'
+) a
+left outer join
+(
+select
+key,
+value
+from bucket_small
+where pri between 1 and 2
+) b
+on
+(a.key = b.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@day=day1/pri=1
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@pri=1
+PREHOOK: Input: default@bucket_small@pri=2
+#### A masked pattern was here ####
+POSTHOOK: query: select
+a.key ,
+a.value ,
+b.value ,
+'day1' as day,
+1 as pri
+from
+(
+select
+key,
+value
+from bucket_big where day='day1'
+) a
+left outer join
+(
+select
+key,
+value
+from bucket_small
+where pri between 1 and 2
+) b
+on
+(a.key = b.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@day=day1/pri=1
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@pri=1
+POSTHOOK: Input: default@bucket_small@pri=2
+#### A masked pattern was here ####
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+103 val_103 val_103 day1 1
+103 val_103 val_103 day1 1
+103 val_103 val_103 day1 1
+103 val_103 val_103 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+172 val_172 val_172 day1 1
+172 val_172 val_172 day1 1
+172 val_172 val_172 day1 1
+172 val_172 val_172 day1 1
+374 val_374 val_374 day1 1
+374 val_374 val_374 day1 1
[25/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_2.q.out b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
new file mode 100644
index 0000000..8156789
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/explainuser_2.q.out
@@ -0,0 +1,5519 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j1
+PREHOOK: query: CREATE TABLE ss(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ss
+POSTHOOK: query: CREATE TABLE ss(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ss
+PREHOOK: query: CREATE TABLE sr(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@sr
+POSTHOOK: query: CREATE TABLE sr(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@sr
+PREHOOK: query: CREATE TABLE cs(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@cs
+POSTHOOK: query: CREATE TABLE cs(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@cs
+PREHOOK: query: INSERT OVERWRITE TABLE ss
+SELECT x.key,x.value,y.key,y.value,z.key,z.value
+FROM src1 x
+JOIN src y ON (x.key = y.key)
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Output: default@ss
+POSTHOOK: query: INSERT OVERWRITE TABLE ss
+SELECT x.key,x.value,y.key,y.value,z.key,z.value
+FROM src1 x
+JOIN src y ON (x.key = y.key)
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@ss
+POSTHOOK: Lineage: ss.k1 SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.k2 SIMPLE [(src)y.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.k3 SIMPLE [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.v1 SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.v2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.v3 SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE sr
+SELECT x.key,x.value,y.key,y.value,z.key,z.value
+FROM src1 x
+JOIN src y ON (x.key = y.key)
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=12)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@sr
+POSTHOOK: query: INSERT OVERWRITE TABLE sr
+SELECT x.key,x.value,y.key,y.value,z.key,z.value
+FROM src1 x
+JOIN src y ON (x.key = y.key)
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=12)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@sr
+POSTHOOK: Lineage: sr.k1 SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.k2 SIMPLE [(src)y.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.k3 SIMPLE [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.v1 SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.v2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.v3 SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE cs
+SELECT x.key,x.value,y.key,y.value,z.key,z.value
+FROM src1 x
+JOIN src y ON (x.key = y.key)
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@cs
+POSTHOOK: query: INSERT OVERWRITE TABLE cs
+SELECT x.key,x.value,y.key,y.value,z.key,z.value
+FROM src1 x
+JOIN src y ON (x.key = y.key)
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@cs
+POSTHOOK: Lineage: cs.k1 SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: cs.k2 SIMPLE [(src)y.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: cs.k3 SIMPLE [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: cs.v1 SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: cs.v2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: cs.v3 SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: ANALYZE TABLE ss COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss
+PREHOOK: Output: default@ss
+POSTHOOK: query: ANALYZE TABLE ss COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss
+POSTHOOK: Output: default@ss
+PREHOOK: query: ANALYZE TABLE ss COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE ss COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE sr COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sr
+PREHOOK: Output: default@sr
+POSTHOOK: query: ANALYZE TABLE sr COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sr
+POSTHOOK: Output: default@sr
+PREHOOK: query: ANALYZE TABLE sr COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sr
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE sr COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sr
+#### A masked pattern was here ####
+PREHOOK: query: ANALYZE TABLE cs COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cs
+PREHOOK: Output: default@cs
+POSTHOOK: query: ANALYZE TABLE cs COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cs
+POSTHOOK: Output: default@cs
+PREHOOK: query: ANALYZE TABLE cs COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cs
+#### A masked pattern was here ####
+POSTHOOK: query: ANALYZE TABLE cs COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cs
+#### A masked pattern was here ####
+PREHOOK: query: EXPLAIN
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11)
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+
+Stage-0
+ Fetch Operator
+ limit:-1
+ Stage-1
+ Reducer 3
+ File Output Operator [FS_18]
+ compressed:false
+ Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
+ Select Operator [SEL_17]
+ outputColumnNames:["_col0","_col1","_col2"]
+ Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ Merge Join Operator [MERGEJOIN_28]
+ | condition map:[{"":"Inner Join 0 to 1"}]
+ | keys:{"0":"_col3 (type: string)","1":"_col0 (type: string)"}
+ | outputColumnNames:["_col0","_col3","_col6"]
+ | Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ |<-Map 5 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_15]
+ | key expressions:_col0 (type: string)
+ | Map-reduce partition columns:_col0 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | value expressions:_col1 (type: string)
+ | Select Operator [SEL_6]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | Filter Operator [FIL_26]
+ | predicate:key is not null (type: boolean)
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | TableScan [TS_5]
+ | alias:y
+ | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ |<-Reducer 2 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_13]
+ key expressions:_col3 (type: string)
+ Map-reduce partition columns:_col3 (type: string)
+ sort order:+
+ Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ value expressions:_col0 (type: string)
+ Merge Join Operator [MERGEJOIN_27]
+ | condition map:[{"":"Inner Join 0 to 1"}]
+ | keys:{"0":"_col0 (type: string)","1":"_col1 (type: string)"}
+ | outputColumnNames:["_col0","_col3"]
+ | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ |<-Map 1 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_8]
+ | key expressions:_col0 (type: string)
+ | Map-reduce partition columns:_col0 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_2]
+ | outputColumnNames:["_col0"]
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | Filter Operator [FIL_24]
+ | predicate:value is not null (type: boolean)
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | TableScan [TS_0]
+ | alias:z
+ | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ |<-Map 4 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_10]
+ key expressions:_col1 (type: string)
+ Map-reduce partition columns:_col1 (type: string)
+ sort order:+
+ Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ value expressions:_col0 (type: string)
+ Select Operator [SEL_4]
+ outputColumnNames:["_col0","_col1"]
+ Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator [FIL_25]
+ predicate:(value is not null and key is not null) (type: boolean)
+ Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ TableScan [TS_3]
+ alias:x
+ Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+
+PREHOOK: query: EXPLAIN
+select
+ss.k1,sr.k2,cs.k3,count(ss.v1),count(sr.v2),count(cs.v3)
+FROM
+ss,sr,cs,src d1,src d2,src d3,src1,srcpart
+where
+ ss.k1 = d1.key
+and sr.k1 = d2.key
+and cs.k1 = d3.key
+and ss.k2 = sr.k2
+and ss.k3 = sr.k3
+and ss.v1 = src1.value
+and ss.v2 = srcpart.value
+and sr.v2 = cs.v2
+and sr.v3 = cs.v3
+and ss.v3='ssv3'
+and sr.v1='srv1'
+and src1.key = 'src1key'
+and srcpart.key = 'srcpartkey'
+and d1.value = 'd1value'
+and d2.value in ('2000Q1','2000Q2','2000Q3')
+and d3.value in ('2000Q1','2000Q2','2000Q3')
+group by
+ss.k1,sr.k2,cs.k3
+order by
+ss.k1,sr.k2,cs.k3
+limit 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+select
+ss.k1,sr.k2,cs.k3,count(ss.v1),count(sr.v2),count(cs.v3)
+FROM
+ss,sr,cs,src d1,src d2,src d3,src1,srcpart
+where
+ ss.k1 = d1.key
+and sr.k1 = d2.key
+and cs.k1 = d3.key
+and ss.k2 = sr.k2
+and ss.k3 = sr.k3
+and ss.v1 = src1.value
+and ss.v2 = srcpart.value
+and sr.v2 = cs.v2
+and sr.v3 = cs.v3
+and ss.v3='ssv3'
+and sr.v1='srv1'
+and src1.key = 'src1key'
+and srcpart.key = 'srcpartkey'
+and d1.value = 'd1value'
+and d2.value in ('2000Q1','2000Q2','2000Q3')
+and d3.value in ('2000Q1','2000Q2','2000Q3')
+group by
+ss.k1,sr.k2,cs.k3
+order by
+ss.k1,sr.k2,cs.k3
+limit 100
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 10 <- Map 14 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE)
+Reducer 11 <- Reducer 10 (SIMPLE_EDGE), Reducer 16 (SIMPLE_EDGE)
+Reducer 16 <- Map 15 (SIMPLE_EDGE), Map 17 (SIMPLE_EDGE)
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 11 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
+Reducer 8 <- Map 12 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+Reducer 9 <- Map 13 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+
+Stage-0
+ Fetch Operator
+ limit:100
+ Stage-1
+ Reducer 5
+ File Output Operator [FS_69]
+ compressed:false
+ Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
+ Limit [LIM_68]
+ Number of rows:100
+ Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator [SEL_67]
+ | outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ | Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+ |<-Reducer 4 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_66]
+ key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
+ sort order:+++
+ Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+ value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
+ Group By Operator [GBY_64]
+ | aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"]
+ | keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
+ | outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ | Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+ |<-Reducer 3 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_63]
+ key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: string)
+ sort order:+++
+ Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+ value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
+ Group By Operator [GBY_62]
+ aggregations:["count(_col3)","count(_col4)","count(_col5)"]
+ keys:_col0 (type: string), _col1 (type: string), _col2 (type: string)
+ outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+ Select Operator [SEL_60]
+ outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+ Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+ Merge Join Operator [MERGEJOIN_111]
+ | condition map:[{"":"Inner Join 0 to 1"}]
+ | keys:{"0":"_col1 (type: string), _col3 (type: string)","1":"_col15 (type: string), _col17 (type: string)"}
+ | outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
+ | Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+ |<-Reducer 11 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_58]
+ | key expressions:_col15 (type: string), _col17 (type: string)
+ | Map-reduce partition columns:_col15 (type: string), _col17 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+ | value expressions:_col6 (type: string), _col7 (type: string), _col14 (type: string)
+ | Select Operator [SEL_49]
+ | outputColumnNames:["_col14","_col15","_col17","_col6","_col7"]
+ | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+ | Merge Join Operator [MERGEJOIN_110]
+ | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | keys:{"0":"_col4 (type: string), _col6 (type: string)","1":"_col2 (type: string), _col4 (type: string)"}
+ | | outputColumnNames:["_col2","_col3","_col14","_col15","_col17"]
+ | | Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+ | |<-Reducer 10 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_45]
+ | | key expressions:_col4 (type: string), _col6 (type: string)
+ | | Map-reduce partition columns:_col4 (type: string), _col6 (type: string)
+ | | sort order:++
+ | | Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+ | | value expressions:_col2 (type: string), _col3 (type: string)
+ | | Merge Join Operator [MERGEJOIN_108]
+ | | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | | keys:{"0":"_col3 (type: string)","1":"_col1 (type: string)"}
+ | | | outputColumnNames:["_col2","_col3","_col4","_col6"]
+ | | | Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+ | | |<-Map 14 [SIMPLE_EDGE]
+ | | | Reduce Output Operator [RS_42]
+ | | | key expressions:_col1 (type: string)
+ | | | Map-reduce partition columns:_col1 (type: string)
+ | | | sort order:+
+ | | | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+ | | | Select Operator [SEL_16]
+ | | | outputColumnNames:["_col1"]
+ | | | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+ | | | Filter Operator [FIL_102]
+ | | | predicate:((key = 'src1key') and value is not null) (type: boolean)
+ | | | Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+ | | | TableScan [TS_14]
+ | | | alias:src1
+ | | | Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ | | |<-Reducer 9 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_40]
+ | | key expressions:_col3 (type: string)
+ | | Map-reduce partition columns:_col3 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+ | | value expressions:_col2 (type: string), _col4 (type: string), _col6 (type: string)
+ | | Merge Join Operator [MERGEJOIN_107]
+ | | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | | keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
+ | | | outputColumnNames:["_col2","_col3","_col4","_col6"]
+ | | | Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+ | | |<-Map 13 [SIMPLE_EDGE]
+ | | | Reduce Output Operator [RS_37]
+ | | | key expressions:_col0 (type: string)
+ | | | Map-reduce partition columns:_col0 (type: string)
+ | | | sort order:+
+ | | | Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ | | | Select Operator [SEL_13]
+ | | | outputColumnNames:["_col0"]
+ | | | Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ | | | Filter Operator [FIL_101]
+ | | | predicate:((value = 'd1value') and key is not null) (type: boolean)
+ | | | Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ | | | TableScan [TS_11]
+ | | | alias:d1
+ | | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ | | |<-Reducer 8 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_35]
+ | | key expressions:_col2 (type: string)
+ | | Map-reduce partition columns:_col2 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ | | value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string)
+ | | Merge Join Operator [MERGEJOIN_106]
+ | | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | | keys:{"0":"_col1 (type: string)","1":"_col3 (type: string)"}
+ | | | outputColumnNames:["_col2","_col3","_col4","_col6"]
+ | | | Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ | | |<-Map 12 [SIMPLE_EDGE]
+ | | | Reduce Output Operator [RS_32]
+ | | | key expressions:_col3 (type: string)
+ | | | Map-reduce partition columns:_col3 (type: string)
+ | | | sort order:+
+ | | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+ | | | value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
+ | | | Select Operator [SEL_10]
+ | | | outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
+ | | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+ | | | Filter Operator [FIL_100]
+ | | | predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
+ | | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+ | | | TableScan [TS_8]
+ | | | alias:ss
+ | | | Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+ | | |<-Map 7 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_30]
+ | | key expressions:_col1 (type: string)
+ | | Map-reduce partition columns:_col1 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ | | Select Operator [SEL_7]
+ | | outputColumnNames:["_col1"]
+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_99]
+ | | predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_5]
+ | | alias:srcpart
+ | | Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ | |<-Reducer 16 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_47]
+ | key expressions:_col2 (type: string), _col4 (type: string)
+ | Map-reduce partition columns:_col2 (type: string), _col4 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+ | value expressions:_col3 (type: string), _col5 (type: string)
+ | Merge Join Operator [MERGEJOIN_109]
+ | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
+ | | outputColumnNames:["_col2","_col3","_col4","_col5"]
+ | | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 15 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_24]
+ | | key expressions:_col0 (type: string)
+ | | Map-reduce partition columns:_col0 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+ | | value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+ | | Select Operator [SEL_19]
+ | | outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
+ | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_103]
+ | | predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean)
+ | | Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_17]
+ | | alias:sr
+ | | Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 17 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_26]
+ | key expressions:_col0 (type: string)
+ | Map-reduce partition columns:_col0 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_22]
+ | outputColumnNames:["_col0"]
+ | Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ | Filter Operator [FIL_104]
+ | predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+ | Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ | TableScan [TS_20]
+ | alias:d1
+ | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ |<-Reducer 2 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_56]
+ key expressions:_col1 (type: string), _col3 (type: string)
+ Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
+ sort order:++
+ Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+ value expressions:_col2 (type: string)
+ Merge Join Operator [MERGEJOIN_105]
+ | condition map:[{"":"Inner Join 0 to 1"}]
+ | keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
+ | outputColumnNames:["_col1","_col2","_col3"]
+ | Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+ |<-Map 1 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_51]
+ | key expressions:_col0 (type: string)
+ | Map-reduce partition columns:_col0 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+ | value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
+ | Select Operator [SEL_1]
+ | outputColumnNames:["_col0","_col1","_col2","_col3"]
+ | Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+ | Filter Operator [FIL_97]
+ | predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
+ | Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+ | TableScan [TS_0]
+ | alias:cs
+ | Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
+ |<-Map 6 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_53]
+ key expressions:_col0 (type: string)
+ Map-reduce partition columns:_col0 (type: string)
+ sort order:+
+ Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Select Operator [SEL_4]
+ outputColumnNames:["_col0"]
+ Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator [FIL_98]
+ predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+ Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ TableScan [TS_2]
+ alias:d1
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+
+PREHOOK: query: explain
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select * from src1 union select * from src)z ON (x.value = z.value)
+union
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select * from src1 union select * from src)z ON (x.value = z.value)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select * from src1 union select * from src)z ON (x.value = z.value)
+union
+SELECT x.key, z.value, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select * from src1 union select * from src)z ON (x.value = z.value)
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Map 1 <- Union 2 (CONTAINS)
+Map 11 <- Union 12 (CONTAINS)
+Map 16 <- Union 12 (CONTAINS)
+Map 8 <- Union 2 (CONTAINS)
+Reducer 13 <- Union 12 (SIMPLE_EDGE)
+Reducer 14 <- Map 17 (SIMPLE_EDGE), Reducer 13 (SIMPLE_EDGE)
+Reducer 15 <- Map 18 (SIMPLE_EDGE), Reducer 14 (SIMPLE_EDGE), Union 6 (CONTAINS)
+Reducer 3 <- Union 2 (SIMPLE_EDGE)
+Reducer 4 <- Map 9 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
+Reducer 5 <- Map 10 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE), Union 6 (CONTAINS)
+Reducer 7 <- Union 6 (SIMPLE_EDGE)
+
+Stage-0
+ Fetch Operator
+ limit:-1
+ Stage-1
+ Reducer 7
+ File Output Operator [FS_59]
+ compressed:false
+ Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
+ Group By Operator [GBY_57]
+ | keys:KEY._col0 (type: string), KEY._col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ |<-Union 6 [SIMPLE_EDGE]
+ |<-Reducer 15 [CONTAINS]
+ | Reduce Output Operator [RS_56]
+ | key expressions:_col0 (type: string), _col1 (type: string)
+ | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_55]
+ | keys:_col0 (type: string), _col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_51]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ | Merge Join Operator [MERGEJOIN_85]
+ | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
+ | | outputColumnNames:["_col1","_col2"]
+ | | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 18 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_49]
+ | | key expressions:_col0 (type: string)
+ | | Map-reduce partition columns:_col0 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | Select Operator [SEL_40]
+ | | outputColumnNames:["_col0"]
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_81]
+ | | predicate:key is not null (type: boolean)
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_39]
+ | | alias:y
+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ | |<-Reducer 14 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_47]
+ | key expressions:_col2 (type: string)
+ | Map-reduce partition columns:_col2 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
+ | value expressions:_col1 (type: string)
+ | Merge Join Operator [MERGEJOIN_84]
+ | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
+ | | outputColumnNames:["_col1","_col2"]
+ | | Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 17 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_44]
+ | | key expressions:_col1 (type: string)
+ | | Map-reduce partition columns:_col1 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | | value expressions:_col0 (type: string)
+ | | Select Operator [SEL_38]
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_80]
+ | | predicate:(value is not null and key is not null) (type: boolean)
+ | | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_37]
+ | | alias:x
+ | | Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ | |<-Reducer 13 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_42]
+ | key expressions:_col1 (type: string)
+ | Map-reduce partition columns:_col1 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_36]
+ | outputColumnNames:["_col1"]
+ | Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_35]
+ | | keys:KEY._col0 (type: string), KEY._col1 (type: string)
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+ | |<-Union 12 [SIMPLE_EDGE]
+ | |<-Map 11 [CONTAINS]
+ | | Reduce Output Operator [RS_34]
+ | | key expressions:_col0 (type: string), _col1 (type: string)
+ | | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | | sort order:++
+ | | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | | Group By Operator [GBY_33]
+ | | keys:_col0 (type: string), _col1 (type: string)
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | | Select Operator [SEL_27]
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_78]
+ | | predicate:value is not null (type: boolean)
+ | | Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_26]
+ | | alias:x
+ | | Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 16 [CONTAINS]
+ | Reduce Output Operator [RS_34]
+ | key expressions:_col0 (type: string), _col1 (type: string)
+ | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_33]
+ | keys:_col0 (type: string), _col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_29]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | Filter Operator [FIL_79]
+ | predicate:value is not null (type: boolean)
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | TableScan [TS_28]
+ | alias:y
+ | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ |<-Reducer 5 [CONTAINS]
+ Reduce Output Operator [RS_56]
+ key expressions:_col0 (type: string), _col1 (type: string)
+ Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ sort order:++
+ Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator [GBY_55]
+ keys:_col0 (type: string), _col1 (type: string)
+ outputColumnNames:["_col0","_col1"]
+ Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ Select Operator [SEL_25]
+ outputColumnNames:["_col0","_col1"]
+ Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Merge Join Operator [MERGEJOIN_83]
+ | condition map:[{"":"Inner Join 0 to 1"}]
+ | keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
+ | outputColumnNames:["_col1","_col2"]
+ | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ |<-Map 10 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_23]
+ | key expressions:_col0 (type: string)
+ | Map-reduce partition columns:_col0 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_14]
+ | outputColumnNames:["_col0"]
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | Filter Operator [FIL_77]
+ | predicate:key is not null (type: boolean)
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | TableScan [TS_13]
+ | alias:y
+ | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ |<-Reducer 4 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_21]
+ key expressions:_col2 (type: string)
+ Map-reduce partition columns:_col2 (type: string)
+ sort order:+
+ Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
+ value expressions:_col1 (type: string)
+ Merge Join Operator [MERGEJOIN_82]
+ | condition map:[{"":"Inner Join 0 to 1"}]
+ | keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
+ | outputColumnNames:["_col1","_col2"]
+ | Statistics:Num rows: 144 Data size: 1509 Basic stats: COMPLETE Column stats: NONE
+ |<-Map 9 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_18]
+ | key expressions:_col1 (type: string)
+ | Map-reduce partition columns:_col1 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | value expressions:_col0 (type: string)
+ | Select Operator [SEL_12]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | Filter Operator [FIL_76]
+ | predicate:(value is not null and key is not null) (type: boolean)
+ | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | TableScan [TS_11]
+ | alias:x
+ | Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ |<-Reducer 3 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_16]
+ key expressions:_col1 (type: string)
+ Map-reduce partition columns:_col1 (type: string)
+ sort order:+
+ Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+ Select Operator [SEL_10]
+ outputColumnNames:["_col1"]
+ Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator [GBY_9]
+ | keys:KEY._col0 (type: string), KEY._col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+ |<-Union 2 [SIMPLE_EDGE]
+ |<-Map 1 [CONTAINS]
+ | Reduce Output Operator [RS_8]
+ | key expressions:_col0 (type: string), _col1 (type: string)
+ | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_7]
+ | keys:_col0 (type: string), _col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_1]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ | Filter Operator [FIL_74]
+ | predicate:value is not null (type: boolean)
+ | Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ | TableScan [TS_0]
+ | alias:x
+ | Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ |<-Map 8 [CONTAINS]
+ Reduce Output Operator [RS_8]
+ key expressions:_col0 (type: string), _col1 (type: string)
+ Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ sort order:++
+ Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator [GBY_7]
+ keys:_col0 (type: string), _col1 (type: string)
+ outputColumnNames:["_col0","_col1"]
+ Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ Select Operator [SEL_3]
+ outputColumnNames:["_col0","_col1"]
+ Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator [FIL_75]
+ predicate:value is not null (type: boolean)
+ Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ TableScan [TS_2]
+ alias:y
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+
+PREHOOK: query: explain
+SELECT x.key, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select * from src1 union select * from src)z ON (x.value = z.value)
+union
+SELECT x.key, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select key, value from src1 union select key, value from src union select key, value from src)z ON (x.value = z.value)
+union
+SELECT x.key, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select key, value from src1 union select key, value from src union select key, value from src union select key, value from src)z ON (x.value = z.value)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT x.key, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select * from src1 union select * from src)z ON (x.value = z.value)
+union
+SELECT x.key, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select key, value from src1 union select key, value from src union select key, value from src)z ON (x.value = z.value)
+union
+SELECT x.key, y.value
+FROM src1 x JOIN src y ON (x.key = y.key)
+JOIN (select key, value from src1 union select key, value from src union select key, value from src union select key, value from src)z ON (x.value = z.value)
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Map 1 <- Union 2 (CONTAINS)
+Map 10 <- Union 2 (CONTAINS)
+Map 13 <- Union 14 (CONTAINS)
+Map 20 <- Union 14 (CONTAINS)
+Map 21 <- Union 16 (CONTAINS)
+Map 24 <- Union 25 (CONTAINS)
+Map 33 <- Union 25 (CONTAINS)
+Map 34 <- Union 27 (CONTAINS)
+Map 35 <- Union 29 (CONTAINS)
+Reducer 15 <- Union 14 (SIMPLE_EDGE), Union 16 (CONTAINS)
+Reducer 17 <- Union 16 (SIMPLE_EDGE)
+Reducer 18 <- Map 22 (SIMPLE_EDGE), Reducer 17 (SIMPLE_EDGE)
+Reducer 19 <- Map 23 (SIMPLE_EDGE), Reducer 18 (SIMPLE_EDGE), Union 6 (CONTAINS)
+Reducer 26 <- Union 25 (SIMPLE_EDGE), Union 27 (CONTAINS)
+Reducer 28 <- Union 27 (SIMPLE_EDGE), Union 29 (CONTAINS)
+Reducer 3 <- Union 2 (SIMPLE_EDGE)
+Reducer 30 <- Union 29 (SIMPLE_EDGE)
+Reducer 31 <- Map 36 (SIMPLE_EDGE), Reducer 30 (SIMPLE_EDGE)
+Reducer 32 <- Map 37 (SIMPLE_EDGE), Reducer 31 (SIMPLE_EDGE), Union 8 (CONTAINS)
+Reducer 4 <- Map 11 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
+Reducer 5 <- Map 12 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE), Union 6 (CONTAINS)
+Reducer 7 <- Union 6 (SIMPLE_EDGE), Union 8 (CONTAINS)
+Reducer 9 <- Union 8 (SIMPLE_EDGE)
+
+Stage-0
+ Fetch Operator
+ limit:-1
+ Stage-1
+ Reducer 9
+ File Output Operator [FS_119]
+ compressed:false
+ Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
+ Group By Operator [GBY_117]
+ | keys:KEY._col0 (type: string), KEY._col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ |<-Union 8 [SIMPLE_EDGE]
+ |<-Reducer 32 [CONTAINS]
+ | Reduce Output Operator [RS_116]
+ | key expressions:_col0 (type: string), _col1 (type: string)
+ | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_115]
+ | keys:_col0 (type: string), _col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_111]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ | Merge Join Operator [MERGEJOIN_167]
+ | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
+ | | outputColumnNames:["_col2","_col5"]
+ | | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 37 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_109]
+ | | key expressions:_col0 (type: string)
+ | | Map-reduce partition columns:_col0 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | value expressions:_col1 (type: string)
+ | | Select Operator [SEL_100]
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_161]
+ | | predicate:key is not null (type: boolean)
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_99]
+ | | alias:y
+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ | |<-Reducer 31 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_107]
+ | key expressions:_col2 (type: string)
+ | Map-reduce partition columns:_col2 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
+ | Merge Join Operator [MERGEJOIN_166]
+ | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
+ | | outputColumnNames:["_col2"]
+ | | Statistics:Num rows: 242 Data size: 2565 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 36 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_104]
+ | | key expressions:_col1 (type: string)
+ | | Map-reduce partition columns:_col1 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | | value expressions:_col0 (type: string)
+ | | Select Operator [SEL_98]
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_160]
+ | | predicate:(value is not null and key is not null) (type: boolean)
+ | | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_97]
+ | | alias:x
+ | | Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ | |<-Reducer 30 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_102]
+ | key expressions:_col1 (type: string)
+ | Map-reduce partition columns:_col1 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 220 Data size: 2332 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_96]
+ | outputColumnNames:["_col1"]
+ | Statistics:Num rows: 220 Data size: 2332 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_95]
+ | | keys:KEY._col0 (type: string), KEY._col1 (type: string)
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 220 Data size: 2332 Basic stats: COMPLETE Column stats: NONE
+ | |<-Union 29 [SIMPLE_EDGE]
+ | |<-Map 35 [CONTAINS]
+ | | Reduce Output Operator [RS_94]
+ | | key expressions:_col0 (type: string), _col1 (type: string)
+ | | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | | sort order:++
+ | | Statistics:Num rows: 440 Data size: 4664 Basic stats: COMPLETE Column stats: NONE
+ | | Group By Operator [GBY_93]
+ | | keys:_col0 (type: string), _col1 (type: string)
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 440 Data size: 4664 Basic stats: COMPLETE Column stats: NONE
+ | | Select Operator [SEL_89]
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_159]
+ | | predicate:value is not null (type: boolean)
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_88]
+ | | alias:y
+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ | |<-Reducer 28 [CONTAINS]
+ | Reduce Output Operator [RS_94]
+ | key expressions:_col0 (type: string), _col1 (type: string)
+ | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 440 Data size: 4664 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_93]
+ | keys:_col0 (type: string), _col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 440 Data size: 4664 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_86]
+ | | keys:KEY._col0 (type: string), KEY._col1 (type: string)
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 190 Data size: 2008 Basic stats: COMPLETE Column stats: NONE
+ | |<-Union 27 [SIMPLE_EDGE]
+ | |<-Map 34 [CONTAINS]
+ | | Reduce Output Operator [RS_85]
+ | | key expressions:_col0 (type: string), _col1 (type: string)
+ | | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | | sort order:++
+ | | Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
+ | | Group By Operator [GBY_84]
+ | | keys:_col0 (type: string), _col1 (type: string)
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
+ | | Select Operator [SEL_80]
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_158]
+ | | predicate:value is not null (type: boolean)
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_79]
+ | | alias:y
+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ | |<-Reducer 26 [CONTAINS]
+ | Reduce Output Operator [RS_85]
+ | key expressions:_col0 (type: string), _col1 (type: string)
+ | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_84]
+ | keys:_col0 (type: string), _col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 381 Data size: 4028 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_77]
+ | | keys:KEY._col0 (type: string), KEY._col1 (type: string)
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 131 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
+ | |<-Union 25 [SIMPLE_EDGE]
+ | |<-Map 24 [CONTAINS]
+ | | Reduce Output Operator [RS_76]
+ | | key expressions:_col0 (type: string), _col1 (type: string)
+ | | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | | sort order:++
+ | | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | | Group By Operator [GBY_75]
+ | | keys:_col0 (type: string), _col1 (type: string)
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | | Select Operator [SEL_69]
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_156]
+ | | predicate:value is not null (type: boolean)
+ | | Statistics:Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_68]
+ | | alias:x
+ | | Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 33 [CONTAINS]
+ | Reduce Output Operator [RS_76]
+ | key expressions:_col0 (type: string), _col1 (type: string)
+ | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_75]
+ | keys:_col0 (type: string), _col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_71]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | Filter Operator [FIL_157]
+ | predicate:value is not null (type: boolean)
+ | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | TableScan [TS_70]
+ | alias:y
+ | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ |<-Reducer 7 [CONTAINS]
+ Reduce Output Operator [RS_116]
+ key expressions:_col0 (type: string), _col1 (type: string)
+ Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ sort order:++
+ Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator [GBY_115]
+ keys:_col0 (type: string), _col1 (type: string)
+ outputColumnNames:["_col0","_col1"]
+ Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator [GBY_66]
+ | keys:KEY._col0 (type: string), KEY._col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ |<-Union 6 [SIMPLE_EDGE]
+ |<-Reducer 19 [CONTAINS]
+ | Reduce Output Operator [RS_65]
+ | key expressions:_col0 (type: string), _col1 (type: string)
+ | Map-reduce partition columns:_col0 (type: string), _col1 (type: string)
+ | sort order:++
+ | Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ | Group By Operator [GBY_64]
+ | keys:_col0 (type: string), _col1 (type: string)
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 550 Data size: 5842 Basic stats: COMPLETE Column stats: NONE
+ | Select Operator [SEL_60]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ | Merge Join Operator [MERGEJOIN_165]
+ | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
+ | | outputColumnNames:["_col2","_col5"]
+ | | Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 23 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_58]
+ | | key expressions:_col0 (type: string)
+ | | Map-reduce partition columns:_col0 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | value expressions:_col1 (type: string)
+ | | Select Operator [SEL_49]
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_155]
+ | | predicate:key is not null (type: boolean)
+ | | Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_48]
+ | | alias:y
+ | | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ | |<-Reducer 18 [SIMPLE_EDGE]
+ | Reduce Output Operator [RS_56]
+ | key expressions:_col2 (type: string)
+ | Map-reduce partition columns:_col2 (type: string)
+ | sort order:+
+ | Statistics:Num rows: 209 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
+ | Merge Join Operator [MERGEJOIN_164]
+ | | condition map:[{"":"Inner Join 0 to 1"}]
+ | | keys:{"0":"_col1 (type: string)","1":"_col1 (type: string)"}
+ | | outputColumnNames:["_col2"]
+ | | Statistics:Num rows: 209 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
+ | |<-Map 22 [SIMPLE_EDGE]
+ | | Reduce Output Operator [RS_53]
+ | | key expressions:_col1 (type: string)
+ | | Map-reduce partition columns:_col1 (type: string)
+ | | sort order:+
+ | | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | | value expressions:_col0 (type: string)
+ | | Select Operator [SEL_47]
+ | | outputColumnNames:["_col0","_col1"]
+ | | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | | Filter Operator [FIL_154]
+ | | predicate:(value is not null and key is not null) (type: boolean)
+ | | Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+ | | TableScan [TS_46]
+ |
<TRUNCATED>
[34/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_views.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_views.q.out b/ql/src/test/results/clientpositive/llap/cbo_views.q.out
new file mode 100644
index 0000000..4a7b935
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_views.q.out
@@ -0,0 +1,237 @@
+PREHOOK: query: -- 10. Test views
+create view v1 as select c_int, value, c_boolean, dt from cbo_t1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: -- 10. Test views
+create view v1 as select c_int, value, c_boolean, dt from cbo_t1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select c_int, value from cbo_t2
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select c_int, value from cbo_t2
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: select value from v1 where c_boolean=false
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: select value from v1 where c_boolean=false
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+1
+1
+PREHOOK: query: select max(c_int) from v1 group by (c_boolean)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: select max(c_int) from v1 group by (c_boolean)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+NULL
+1
+1
+PREHOOK: query: select count(v1.c_int) from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(v1.c_int) from v1 join cbo_t2 on v1.c_int = cbo_t2.c_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+234
+PREHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v2
+#### A masked pattern was here ####
+234
+PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+156
+PREHOOK: query: create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v3
+POSTHOOK: query: create view v3 as select v1.value val from v1 join cbo_t1 on v1.c_boolean = cbo_t1.c_boolean
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v3
+PREHOOK: query: select count(val) from v3 where val != '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v3
+#### A masked pattern was here ####
+POSTHOOK: query: select count(val) from v3 where val != '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v3
+#### A masked pattern was here ####
+96
+PREHOOK: query: with q1 as ( select key from cbo_t1 where key = '1')
+select count(*) from q1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select key from cbo_t1 where key = '1')
+select count(*) from q1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+12
+PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false)
+select count(value) from q1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false)
+select count(value) from q1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+2
+PREHOOK: query: create view v4 as
+with q1 as ( select key,c_int from cbo_t1 where key = '1')
+select * from q1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v4
+POSTHOOK: query: create view v4 as
+with q1 as ( select key,c_int from cbo_t1 where key = '1')
+select * from q1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v4
+PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false),
+q2 as ( select c_int,c_boolean from v1 where value = '1')
+select sum(c_int) from (select c_int from q1) a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false),
+q2 as ( select c_int,c_boolean from v1 where value = '1')
+select sum(c_int) from (select c_int from q1) a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+#### A masked pattern was here ####
+2
+PREHOOK: query: with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int and cbo_t1.dt='2014'),
+q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14')
+select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v4
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as ( select cbo_t1.c_int c_int from q2 join cbo_t1 where q2.c_int = cbo_t1.c_int and cbo_t1.dt='2014'),
+q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14')
+select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v4
+#### A masked pattern was here ####
+31104
+PREHOOK: query: drop view v1
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@v1
+PREHOOK: Output: default@v1
+POSTHOOK: query: drop view v1
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: default@v1
+PREHOOK: query: drop view v2
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@v2
+PREHOOK: Output: default@v2
+POSTHOOK: query: drop view v2
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@v2
+POSTHOOK: Output: default@v2
+PREHOOK: query: drop view v3
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@v3
+PREHOOK: Output: default@v3
+POSTHOOK: query: drop view v3
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@v3
+POSTHOOK: Output: default@v3
+PREHOOK: query: drop view v4
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@v4
+PREHOOK: Output: default@v4
+POSTHOOK: query: drop view v4
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@v4
+POSTHOOK: Output: default@v4
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_windowing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_windowing.q.out b/ql/src/test/results/clientpositive/llap/cbo_windowing.q.out
new file mode 100644
index 0000000..52b584a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_windowing.q.out
@@ -0,0 +1,293 @@
+PREHOOK: query: -- 9. Test Windowing Functions
+-- SORT_QUERY_RESULTS
+
+select count(c_int) over() from cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 9. Test Windowing Functions
+-- SORT_QUERY_RESULTS
+
+select count(c_int) over() from cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+PREHOOK: query: select count(c_int) over(partition by c_float order by key), sum(c_float) over(partition by c_float order by key), max(c_int) over(partition by c_float order by key), min(c_int) over(partition by c_float order by key), row_number() over(partition by c_float order by key) as rn, rank() over(partition by c_float order by key), dense_rank() over(partition by c_float order by key), round(percent_rank() over(partition by c_float order by key), 2), lead(c_int, 2, c_int) over(partition by c_float order by key), lag(c_float, 2, c_float) over(partition by c_float order by key) from cbo_t1 order by rn
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select count(c_int) over(partition by c_float order by key), sum(c_float) over(partition by c_float order by key), max(c_int) over(partition by c_float order by key), min(c_int) over(partition by c_float order by key), row_number() over(partition by c_float order by key) as rn, rank() over(partition by c_float order by key), dense_rank() over(partition by c_float order by key), round(percent_rank() over(partition by c_float order by key), 2), lead(c_int, 2, c_int) over(partition by c_float order by key), lag(c_float, 2, c_float) over(partition by c_float order by key) from cbo_t1 order by rn
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0 NULL NULL NULL 1 1 1 0.0 NULL NULL
+0 NULL NULL NULL 2 1 1 0.0 NULL NULL
+16 16.0 1 1 10 5 3 0.24 1 1.0
+16 16.0 1 1 11 5 3 0.24 1 1.0
+16 16.0 1 1 12 5 3 0.24 1 1.0
+16 16.0 1 1 13 5 3 0.24 1 1.0
+16 16.0 1 1 14 5 3 0.24 1 1.0
+16 16.0 1 1 15 5 3 0.24 1 1.0
+16 16.0 1 1 16 5 3 0.24 1 1.0
+16 16.0 1 1 5 5 3 0.24 1 1.0
+16 16.0 1 1 6 5 3 0.24 1 1.0
+16 16.0 1 1 7 5 3 0.24 1 1.0
+16 16.0 1 1 8 5 3 0.24 1 1.0
+16 16.0 1 1 9 5 3 0.24 1 1.0
+18 18.0 1 1 17 17 4 0.94 1 1.0
+18 18.0 1 1 18 17 4 0.94 1 1.0
+2 2.0 1 1 1 1 1 0.0 1 1.0
+2 2.0 1 1 2 1 1 0.0 1 1.0
+4 4.0 1 1 3 3 2 0.12 1 1.0
+4 4.0 1 1 4 3 2 0.12 1 1.0
+PREHOOK: query: select * from (select count(c_int) over(partition by c_float order by key), sum(c_float) over(partition by c_float order by key), max(c_int) over(partition by c_float order by key), min(c_int) over(partition by c_float order by key), row_number() over(partition by c_float order by key) as rn, rank() over(partition by c_float order by key), dense_rank() over(partition by c_float order by key), round(percent_rank() over(partition by c_float order by key),2), lead(c_int, 2, c_int) over(partition by c_float order by key ), lag(c_float, 2, c_float) over(partition by c_float order by key) from cbo_t1 order by rn) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(c_int) over(partition by c_float order by key), sum(c_float) over(partition by c_float order by key), max(c_int) over(partition by c_float order by key), min(c_int) over(partition by c_float order by key), row_number() over(partition by c_float order by key) as rn, rank() over(partition by c_float order by key), dense_rank() over(partition by c_float order by key), round(percent_rank() over(partition by c_float order by key),2), lead(c_int, 2, c_int) over(partition by c_float order by key ), lag(c_float, 2, c_float) over(partition by c_float order by key) from cbo_t1 order by rn) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+0 NULL NULL NULL 1 1 1 0.0 NULL NULL
+0 NULL NULL NULL 2 1 1 0.0 NULL NULL
+16 16.0 1 1 10 5 3 0.24 1 1.0
+16 16.0 1 1 11 5 3 0.24 1 1.0
+16 16.0 1 1 12 5 3 0.24 1 1.0
+16 16.0 1 1 13 5 3 0.24 1 1.0
+16 16.0 1 1 14 5 3 0.24 1 1.0
+16 16.0 1 1 15 5 3 0.24 1 1.0
+16 16.0 1 1 16 5 3 0.24 1 1.0
+16 16.0 1 1 5 5 3 0.24 1 1.0
+16 16.0 1 1 6 5 3 0.24 1 1.0
+16 16.0 1 1 7 5 3 0.24 1 1.0
+16 16.0 1 1 8 5 3 0.24 1 1.0
+16 16.0 1 1 9 5 3 0.24 1 1.0
+18 18.0 1 1 17 17 4 0.94 1 1.0
+18 18.0 1 1 18 17 4 0.94 1 1.0
+2 2.0 1 1 1 1 1 0.0 1 1.0
+2 2.0 1 1 2 1 1 0.0 1 1.0
+4 4.0 1 1 3 3 2 0.12 1 1.0
+4 4.0 1 1 4 3 2 0.12 1 1.0
+PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+PREHOOK: query: select 1+sum(c_int) over() from cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select 1+sum(c_int) over() from cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+19
+PREHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+36
+PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1 1 1 1.0 1.0 1.0 1.0 1.0 1.0
+1 1 1 1.0 1.0 1.0 1.0 1.0 1.0
+1 1 1 1.0 1.0 1.0 1.0 1.0 1.0
+1 1 1 1.0 1.0 1.0 1.0 1.0 1.0
+1 1 2 1.0 10.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 11.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 12.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 2.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 2.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 2.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 2.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 3.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 4.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 5.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 6.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 7.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 8.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 9.0 1.0 2.0 1.0 1.0
+NULL NULL 0 NULL NULL NULL NULL NULL NULL
+NULL NULL 0 NULL NULL NULL NULL NULL NULL
+PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+NULL NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL
+NULL NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL
+PREHOOK: query: select *, rank() over(partition by key order by value) as rr from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: select *, rank() over(partition by key order by value) as rr from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+ 1
+ 1
+ 1
+ 1
+ val_165 5
+ val_193 6
+ val_265 7
+ val_27 8
+ val_409 9
+ val_484 10
+128 1
+146 val_146 1
+150 val_150 1
+213 val_213 1
+224 1
+238 val_238 1
+255 val_255 1
+273 val_273 1
+278 val_278 1
+311 val_311 1
+369 1
+401 val_401 1
+406 val_406 1
+66 val_66 1
+98 val_98 1
+PREHOOK: query: select *, rank() over(partition by key order by value) from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: select *, rank() over(partition by key order by value) from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+ 1
+ 1
+ 1
+ 1
+ val_165 5
+ val_193 6
+ val_265 7
+ val_27 8
+ val_409 9
+ val_484 10
+128 1
+146 val_146 1
+150 val_150 1
+213 val_213 1
+224 1
+238 val_238 1
+255 val_255 1
+273 val_273 1
+278 val_278 1
+311 val_311 1
+369 1
+401 val_401 1
+406 val_406 1
+66 val_66 1
+98 val_98 1
[06/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
new file mode 100644
index 0000000..5d7a985
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge_incompat1.q.out
@@ -0,0 +1,245 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5b
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 5 files total
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 5 files total
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 6 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+PREHOOK: query: alter table orc_merge5b concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: alter table orc_merge5b concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+PREHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 4 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
+5 eat 0.8 6 1969-12-31 16:00:20
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
new file mode 100644
index 0000000..83543d5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge_incompat2.q.out
@@ -0,0 +1,375 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc merge file tests for dynamic partition case
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc merge file tests for dynamic partition case
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5a
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ st
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 4 items
+#### A masked pattern was here ####
+Found 4 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+PREHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-1 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-0
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ partition:
+ st 80.0
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 3 items
+#### A masked pattern was here ####
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
new file mode 100644
index 0000000..2d0984b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out
@@ -0,0 +1,701 @@
+PREHOOK: query: CREATE TABLE staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@staging
+POSTHOOK: query: CREATE TABLE staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@staging
+PREHOOK: query: CREATE TABLE orc_ppd_staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: CREATE TABLE orc_ppd_staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_ppd_staging
+PREHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from staging order by t, s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from staging order by t, s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_ppd_staging
+POSTHOOK: Lineage: orc_ppd_staging.b SIMPLE [(staging)staging.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.bin SIMPLE [(staging)staging.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.bo SIMPLE [(staging)staging.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.c EXPRESSION [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.d SIMPLE [(staging)staging.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.da EXPRESSION [(staging)staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.dec SIMPLE [(staging)staging.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.f SIMPLE [(staging)staging.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.i SIMPLE [(staging)staging.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.s SIMPLE [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.si SIMPLE [(staging)staging.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.t SIMPLE [(staging)staging.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.ts SIMPLE [(staging)staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.v EXPRESSION [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+PREHOOK: query: -- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
+-- which makes it hard to test bloom filters
+insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11","1990-03-11 10:11:58.703308",-71.54,"aaa" from staging limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: -- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
+-- which makes it hard to test bloom filters
+insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11","1990-03-11 10:11:58.703308",-71.54,"aaa" from staging limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_ppd_staging
+POSTHOOK: Lineage: orc_ppd_staging.b EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.bin EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.bo SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.c EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.d EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.da EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.dec EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.f EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.i EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.s SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.si EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.t EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.ts EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.v EXPRESSION []
+PREHOOK: query: insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11","2023-03-11 10:11:58.703308",71.54,"zzz" from staging limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11","2023-03-11 10:11:58.703308",71.54,"zzz" from staging limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_ppd_staging
+POSTHOOK: Lineage: orc_ppd_staging.b SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.bin EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.bo SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.c EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.d SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.da EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.dec EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.f EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.i SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.s SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.si EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.t EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.ts EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.v EXPRESSION []
+PREHOOK: query: CREATE TABLE orc_ppd(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_ppd
+POSTHOOK: query: CREATE TABLE orc_ppd(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_ppd
+PREHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from orc_ppd_staging order by t, s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd_staging
+PREHOOK: Output: default@orc_ppd
+POSTHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from orc_ppd_staging order by t, s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_ppd_staging
+POSTHOOK: Output: default@orc_ppd
+POSTHOOK: Lineage: orc_ppd.b SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.bin SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.bo SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.c EXPRESSION [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.d SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.da EXPRESSION [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.dec SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: orc_ppd.f SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.i SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.s SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.si SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.t SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.ts SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.v EXPRESSION [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:s, type:string, comment:null), ]
+PREHOOK: query: -- Row group statistics for column t:
+-- Entry 0: count: 994 hasNull: true min: -10 max: 54 sum: 26014 positions: 0,0,0,0,0,0,0
+-- Entry 1: count: 1000 hasNull: false min: 54 max: 118 sum: 86812 positions: 0,2,124,0,0,116,11
+-- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 0,4,119,0,0,244,19
+
+-- INPUT_RECORDS: 2100 (all row groups)
+select count(*) from orc_ppd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2100
+PREHOOK: query: -- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where t > 127
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = 55
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+8
+PREHOOK: query: select count(*) from orc_ppd where t <=> 50
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+22
+PREHOOK: query: select count(*) from orc_ppd where t <=> 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+16
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t = "54"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+18
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = -10.0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = cast(53 as float)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+32
+PREHOOK: query: select count(*) from orc_ppd where t = cast(53 as double)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+32
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1697
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t < 100 and t > 98
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+12
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1713
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t is null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: -- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where t in (5, 120)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+50
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t between 60 and 80
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+318
+PREHOOK: query: -- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where t = -100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where t <=> -100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where t = 125
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where t IN (-100, 125, 200)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: -- Row group statistics for column s:
+-- Entry 0: count: 1000 hasNull: false min: max: zach young sum: 12907 positions: 0,0,0
+-- Entry 1: count: 1000 hasNull: false min: alice allen max: zach zipper sum: 12704 positions: 0,1611,191
+-- Entry 2: count: 100 hasNull: false min: bob davidson max: zzz sum: 1281 positions: 0,3246,373
+
+-- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where s > "zzz"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = "zach young"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: select count(*) from orc_ppd where s <=> "zach zipper"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: select count(*) from orc_ppd where s <=> ""
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: -- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s is null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 2100
+select count(*) from orc_ppd where s is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2100
+PREHOOK: query: -- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = cast("zach young" as char(50))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = cast("zach young" as char(10))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(10))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(50))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s < "b"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+81
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s > "alice" and s < "bob"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+74
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s in ("alice allen", "")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+12
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s between "" and "alice allen"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+13
+PREHOOK: query: -- INPUT_RECORDS: 100 (1 row group)
+select count(*) from orc_ppd where s between "zz" and "zzz"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1
+PREHOOK: query: -- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where s between "zach zipper" and "zzz"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+7
+PREHOOK: query: -- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = "hello world"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where s <=> "apache hive"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where s IN ("a", "z")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "sarah ovid"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: -- INPUT_RECORDS: 1100
+select count(*) from orc_ppd where s = "wendy king"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: -- INPUT_RECORDS: 1000
+select count(*) from orc_ppd where s = "wendy king" and t < 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: -- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "wendy king" and t > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_vectorization_ppd.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_vectorization_ppd.q.out b/ql/src/test/results/clientpositive/llap/orc_vectorization_ppd.q.out
new file mode 100644
index 0000000..738abc4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_vectorization_ppd.q.out
@@ -0,0 +1,288 @@
+PREHOOK: query: -- create table with 1000 rows
+create table srcorc(key string, value string) stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcorc
+POSTHOOK: query: -- create table with 1000 rows
+create table srcorc(key string, value string) stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcorc
+PREHOOK: query: insert overwrite table srcorc select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@srcorc
+POSTHOOK: query: insert overwrite table srcorc select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@srcorc
+POSTHOOK: Lineage: srcorc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcorc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table srcorc select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@srcorc
+POSTHOOK: query: insert into table srcorc select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@srcorc
+POSTHOOK: Lineage: srcorc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcorc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- load table with each row group having 1000 rows and stripe 1 & 2 having 5000 & 2000 rows respectively
+create table if not exists vectororc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: -- load table with each row group having 1000 rows and stripe 1 & 2 having 5000 & 2000 rows respectively
+create table if not exists vectororc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectororc
+PREHOOK: query: -- insert creates separate orc files
+insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: -- insert creates separate orc files
+insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 SIMPLE []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select null, "b", rand(2), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select null, "b", rand(2), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 EXPRESSION []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select null, "c", rand(3), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select null, "c", rand(3), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 EXPRESSION []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select "apple", "d", rand(4), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select "apple", "d", rand(4), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 SIMPLE []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select null, "e", rand(5), "z" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select null, "e", rand(5), "z" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 EXPRESSION []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select "apple", "f", rand(6), "z" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select "apple", "f", rand(6), "z" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 SIMPLE []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: insert into table vectororc select null, "g", rand(7), "zoo" from srcorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: insert into table vectororc select null, "g", rand(7), "zoo" from srcorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@vectororc
+POSTHOOK: Lineage: vectororc.d EXPRESSION []
+POSTHOOK: Lineage: vectororc.s1 EXPRESSION []
+POSTHOOK: Lineage: vectororc.s2 SIMPLE []
+POSTHOOK: Lineage: vectororc.s3 SIMPLE []
+PREHOOK: query: -- since vectororc table has multiple orc file we will load them into a single file using another table
+create table if not exists testorc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testorc
+POSTHOOK: query: -- since vectororc table has multiple orc file we will load them into a single file using another table
+create table if not exists testorc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testorc
+PREHOOK: query: insert overwrite table testorc select * from vectororc order by s2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@vectororc
+PREHOOK: Output: default@testorc
+POSTHOOK: query: insert overwrite table testorc select * from vectororc order by s2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@vectororc
+POSTHOOK: Output: default@testorc
+POSTHOOK: Lineage: testorc.d SIMPLE [(vectororc)vectororc.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: testorc.s1 SIMPLE [(vectororc)vectororc.FieldSchema(name:s1, type:string, comment:null), ]
+POSTHOOK: Lineage: testorc.s2 SIMPLE [(vectororc)vectororc.FieldSchema(name:s2, type:string, comment:null), ]
+POSTHOOK: Lineage: testorc.s3 SIMPLE [(vectororc)vectororc.FieldSchema(name:s3, type:string, comment:null), ]
+PREHOOK: query: -- row group (1,4) from stripe 1 and row group (1) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s1 is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- row group (1,4) from stripe 1 and row group (1) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s1 is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+3000 1505
+PREHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s1 is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s1 is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+3000 1505
+PREHOOK: query: -- row group (2,3,5) from stripe 1 and row group (2) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- row group (2,3,5) from stripe 1 and row group (2) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+4000 2006
+PREHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+4000 2006
+PREHOOK: query: -- last row group of stripe 1 and first row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s3="z"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- last row group of stripe 1 and first row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s3="z"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+2000 1011
+PREHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s3="z"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s3="z"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+2000 1011
+PREHOOK: query: -- first row group of stripe 1 and last row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- first row group of stripe 1 and last row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+2000 1006
+PREHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testorc
+#### A masked pattern was here ####
+POSTHOOK: query: -- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testorc
+#### A masked pattern was here ####
+2000 1006
+PREHOOK: query: drop table srcorc
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@srcorc
+PREHOOK: Output: default@srcorc
+POSTHOOK: query: drop table srcorc
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@srcorc
+POSTHOOK: Output: default@srcorc
+PREHOOK: query: drop table vectororc
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@vectororc
+PREHOOK: Output: default@vectororc
+POSTHOOK: query: drop table vectororc
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@vectororc
+POSTHOOK: Output: default@vectororc
+PREHOOK: query: drop table testorc
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@testorc
+PREHOOK: Output: default@testorc
+POSTHOOK: query: drop table testorc
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@testorc
+POSTHOOK: Output: default@testorc
[18/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
new file mode 100644
index 0000000..9119780
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/limit_pushdown.q.out
@@ -0,0 +1,1487 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- HIVE-3562 Some limit can be pushed down to map stage
+
+explain
+select key,value from src order by key limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- HIVE-3562 Some limit can be pushed down to map stage
+
+explain
+select key,value from src order by key limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select key,value from src order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value from src order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+118 val_118
+119 val_119
+PREHOOK: query: explain
+select key,value from src order by key desc limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key,value from src order by key desc limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: -
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select key,value from src order by key desc limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value from src order by key desc limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+82 val_82
+83 val_83
+83 val_83
+84 val_84
+84 val_84
+85 val_85
+86 val_86
+87 val_87
+9 val_9
+90 val_90
+90 val_90
+90 val_90
+92 val_92
+95 val_95
+95 val_95
+96 val_96
+97 val_97
+97 val_97
+98 val_98
+98 val_98
+PREHOOK: query: explain
+select value, sum(key + 1) as sum from src group by value order by value limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select value, sum(key + 1) as sum from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ value expressions: _col1 (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select value, sum(key + 1) as sum from src group by value order by value limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value, sum(key + 1) as sum from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_0 3.0
+val_10 11.0
+val_100 202.0
+val_103 208.0
+val_104 210.0
+val_105 106.0
+val_11 12.0
+val_111 112.0
+val_113 228.0
+val_114 115.0
+val_116 117.0
+val_118 238.0
+val_119 360.0
+val_12 26.0
+val_120 242.0
+val_125 252.0
+val_126 127.0
+val_128 387.0
+val_129 260.0
+val_131 132.0
+PREHOOK: query: -- deduped RS
+explain
+select value,avg(key + 1) from src group by value order by value limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- deduped RS
+explain
+select value,avg(key + 1) from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string), (UDFToDouble(key) + 1.0) (type: double)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: avg(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ value expressions: _col1 (type: struct<count:bigint,sum:double,input:double>)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: avg(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select value,avg(key + 1) from src group by value order by value limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value,avg(key + 1) from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_0 1.0
+val_10 11.0
+val_100 101.0
+val_103 104.0
+val_104 105.0
+val_105 106.0
+val_11 12.0
+val_111 112.0
+val_113 114.0
+val_114 115.0
+val_116 117.0
+val_118 119.0
+val_119 120.0
+val_12 13.0
+val_120 121.0
+val_125 126.0
+val_126 127.0
+val_128 129.0
+val_129 130.0
+val_131 132.0
+PREHOOK: query: -- distincts
+explain
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- distincts
+explain
+select distinct(cdouble) as dis from alltypesorc order by dis limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: alltypesorc
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cdouble (type: double)
+ outputColumnNames: _col0
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: double)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: double)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: double)
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: double)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select distinct(cdouble) as dis from alltypesorc order by dis limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-16269.0
+-16274.0
+-16277.0
+-16280.0
+-16296.0
+-16300.0
+-16305.0
+-16306.0
+-16307.0
+-16309.0
+-16310.0
+-16311.0
+-16324.0
+-16339.0
+-16355.0
+-16369.0
+-16372.0
+-16373.0
+-16379.0
+NULL
+PREHOOK: query: explain
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: alltypesorc
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), cdouble (type: double)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: tinyint), _col1 (type: double)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: tinyint), _col1 (type: double)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: tinyint)
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: tinyint)
+ mode: complete
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-46 24
+-47 22
+-48 29
+-49 26
+-50 30
+-51 21
+-52 33
+-53 22
+-54 26
+-55 29
+-56 36
+-57 35
+-58 23
+-59 31
+-60 27
+-61 25
+-62 27
+-63 19
+-64 24
+NULL 2932
+PREHOOK: query: explain
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: alltypesorc
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), cdouble (type: double)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: tinyint), _col1 (type: double)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: tinyint), _col1 (type: double)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: tinyint)
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: tinyint)
+ mode: complete
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(cdouble) from (select ctinyint, cdouble from alltypesorc group by ctinyint, cdouble) t1 group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-46 24
+-47 22
+-48 29
+-49 26
+-50 30
+-51 21
+-52 33
+-53 22
+-54 26
+-55 29
+-56 36
+-57 35
+-58 23
+-59 31
+-60 27
+-61 25
+-62 27
+-63 19
+-64 24
+NULL 2932
+PREHOOK: query: -- multi distinct
+explain
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- multi distinct
+explain
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: alltypesorc
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(DISTINCT _col1), count(DISTINCT _col2)
+ keys: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string)
+ sort order: +++
+ Map-reduce partition columns: _col0 (type: tinyint)
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0)
+ keys: KEY._col0 (type: tinyint)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+-46 3 19
+-47 3 23
+-48 3 27
+-49 3 24
+-50 3 25
+-51 1012 1045
+-52 3 21
+-53 3 17
+-54 3 21
+-55 3 21
+-56 3 22
+-57 3 23
+-58 3 24
+-59 3 27
+-60 3 25
+-61 3 25
+-62 3 23
+-63 3 16
+-64 3 13
+NULL 3065 3
+PREHOOK: query: -- limit zero
+explain
+select key,value from src order by key limit 0
+PREHOOK: type: QUERY
+POSTHOOK: query: -- limit zero
+explain
+select key,value from src order by key limit 0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 0
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select key,value from src order by key limit 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value from src order by key limit 0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+PREHOOK: query: -- 2MR (applied to last RS)
+explain
+select value, sum(key) as sum from src group by value order by sum limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2MR (applied to last RS)
+explain
+select value, sum(key) as sum from src group by value order by sum limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string), key (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: double)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ value expressions: _col0 (type: string)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: double)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select value, sum(key) as sum from src group by value order by sum limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value, sum(key) as sum from src group by value order by sum limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_0 0.0
+val_10 10.0
+val_11 11.0
+val_12 24.0
+val_15 30.0
+val_17 17.0
+val_18 36.0
+val_19 19.0
+val_2 2.0
+val_20 20.0
+val_27 27.0
+val_28 28.0
+val_30 30.0
+val_33 33.0
+val_34 34.0
+val_4 4.0
+val_41 41.0
+val_5 15.0
+val_8 8.0
+val_9 9.0
+PREHOOK: query: -- subqueries
+explain
+select * from
+(select key, count(1) from src group by key order by key limit 2) subq
+join
+(select key, count(1) from src group by key limit 3) subq2
+on subq.key=subq2.key limit 4
+PREHOOK: type: QUERY
+POSTHOOK: query: -- subqueries
+explain
+select * from
+(select key, count(1) from src group by key order by key limit 2) subq
+join
+(select key, count(1) from src group by key limit 3) subq2
+on subq.key=subq2.key limit 4
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
+ Reducer 5 <- Map 4 (SIMPLE_EDGE)
+ Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 2
+ Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: _col0 is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 4
+ Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 5
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 3
+ Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ value expressions: _col0 (type: string), _col1 (type: bigint)
+ Reducer 6
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 3
+ Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: _col0 is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 4
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- map aggregation disabled
+explain
+select value, sum(key) as sum from src group by value order by value limit 20
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map aggregation disabled
+explain
+select value, sum(key) as sum from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string), key (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 0.3
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: complete
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 20
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 20
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select value, sum(key) as sum from src group by value order by value limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select value, sum(key) as sum from src group by value order by value limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+val_0 0.0
+val_10 10.0
+val_100 200.0
+val_103 206.0
+val_104 208.0
+val_105 105.0
+val_11 11.0
+val_111 111.0
+val_113 226.0
+val_114 114.0
+val_116 116.0
+val_118 236.0
+val_119 357.0
+val_12 24.0
+val_120 240.0
+val_125 250.0
+val_126 126.0
+val_128 384.0
+val_129 258.0
+val_131 131.0
+PREHOOK: query: -- flush for order-by
+explain
+select key,value,value,value,value,value,value,value,value from src order by key limit 100
+PREHOOK: type: QUERY
+POSTHOOK: query: -- flush for order-by
+explain
+select key,value,value,value,value,value,value,value,value from src order by key limit 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 2.0E-5
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 100
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 100
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select key,value,value,value,value,value,value,value,value from src order by key limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key,value,value,value,value,value,value,value,value from src order by key limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 val_0 val_0 val_0 val_0 val_0 val_0 val_0 val_0
+0 val_0 val_0 val_0 val_0 val_0 val_0 val_0 val_0
+0 val_0 val_0 val_0 val_0 val_0 val_0 val_0 val_0
+10 val_10 val_10 val_10 val_10 val_10 val_10 val_10 val_10
+100 val_100 val_100 val_100 val_100 val_100 val_100 val_100 val_100
+100 val_100 val_100 val_100 val_100 val_100 val_100 val_100 val_100
+103 val_103 val_103 val_103 val_103 val_103 val_103 val_103 val_103
+103 val_103 val_103 val_103 val_103 val_103 val_103 val_103 val_103
+104 val_104 val_104 val_104 val_104 val_104 val_104 val_104 val_104
+104 val_104 val_104 val_104 val_104 val_104 val_104 val_104 val_104
+105 val_105 val_105 val_105 val_105 val_105 val_105 val_105 val_105
+11 val_11 val_11 val_11 val_11 val_11 val_11 val_11 val_11
+111 val_111 val_111 val_111 val_111 val_111 val_111 val_111 val_111
+113 val_113 val_113 val_113 val_113 val_113 val_113 val_113 val_113
+113 val_113 val_113 val_113 val_113 val_113 val_113 val_113 val_113
+114 val_114 val_114 val_114 val_114 val_114 val_114 val_114 val_114
+116 val_116 val_116 val_116 val_116 val_116 val_116 val_116 val_116
+118 val_118 val_118 val_118 val_118 val_118 val_118 val_118 val_118
+118 val_118 val_118 val_118 val_118 val_118 val_118 val_118 val_118
+119 val_119 val_119 val_119 val_119 val_119 val_119 val_119 val_119
+119 val_119 val_119 val_119 val_119 val_119 val_119 val_119 val_119
+119 val_119 val_119 val_119 val_119 val_119 val_119 val_119 val_119
+12 val_12 val_12 val_12 val_12 val_12 val_12 val_12 val_12
+12 val_12 val_12 val_12 val_12 val_12 val_12 val_12 val_12
+120 val_120 val_120 val_120 val_120 val_120 val_120 val_120 val_120
+120 val_120 val_120 val_120 val_120 val_120 val_120 val_120 val_120
+125 val_125 val_125 val_125 val_125 val_125 val_125 val_125 val_125
+125 val_125 val_125 val_125 val_125 val_125 val_125 val_125 val_125
+126 val_126 val_126 val_126 val_126 val_126 val_126 val_126 val_126
+128 val_128 val_128 val_128 val_128 val_128 val_128 val_128 val_128
+128 val_128 val_128 val_128 val_128 val_128 val_128 val_128 val_128
+128 val_128 val_128 val_128 val_128 val_128 val_128 val_128 val_128
+129 val_129 val_129 val_129 val_129 val_129 val_129 val_129 val_129
+129 val_129 val_129 val_129 val_129 val_129 val_129 val_129 val_129
+131 val_131 val_131 val_131 val_131 val_131 val_131 val_131 val_131
+133 val_133 val_133 val_133 val_133 val_133 val_133 val_133 val_133
+134 val_134 val_134 val_134 val_134 val_134 val_134 val_134 val_134
+134 val_134 val_134 val_134 val_134 val_134 val_134 val_134 val_134
+136 val_136 val_136 val_136 val_136 val_136 val_136 val_136 val_136
+137 val_137 val_137 val_137 val_137 val_137 val_137 val_137 val_137
+137 val_137 val_137 val_137 val_137 val_137 val_137 val_137 val_137
+138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 val_138
+138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 val_138
+138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 val_138
+138 val_138 val_138 val_138 val_138 val_138 val_138 val_138 val_138
+143 val_143 val_143 val_143 val_143 val_143 val_143 val_143 val_143
+145 val_145 val_145 val_145 val_145 val_145 val_145 val_145 val_145
+146 val_146 val_146 val_146 val_146 val_146 val_146 val_146 val_146
+146 val_146 val_146 val_146 val_146 val_146 val_146 val_146 val_146
+149 val_149 val_149 val_149 val_149 val_149 val_149 val_149 val_149
+149 val_149 val_149 val_149 val_149 val_149 val_149 val_149 val_149
+15 val_15 val_15 val_15 val_15 val_15 val_15 val_15 val_15
+15 val_15 val_15 val_15 val_15 val_15 val_15 val_15 val_15
+150 val_150 val_150 val_150 val_150 val_150 val_150 val_150 val_150
+152 val_152 val_152 val_152 val_152 val_152 val_152 val_152 val_152
+152 val_152 val_152 val_152 val_152 val_152 val_152 val_152 val_152
+153 val_153 val_153 val_153 val_153 val_153 val_153 val_153 val_153
+155 val_155 val_155 val_155 val_155 val_155 val_155 val_155 val_155
+156 val_156 val_156 val_156 val_156 val_156 val_156 val_156 val_156
+157 val_157 val_157 val_157 val_157 val_157 val_157 val_157 val_157
+158 val_158 val_158 val_158 val_158 val_158 val_158 val_158 val_158
+160 val_160 val_160 val_160 val_160 val_160 val_160 val_160 val_160
+162 val_162 val_162 val_162 val_162 val_162 val_162 val_162 val_162
+163 val_163 val_163 val_163 val_163 val_163 val_163 val_163 val_163
+164 val_164 val_164 val_164 val_164 val_164 val_164 val_164 val_164
+164 val_164 val_164 val_164 val_164 val_164 val_164 val_164 val_164
+165 val_165 val_165 val_165 val_165 val_165 val_165 val_165 val_165
+165 val_165 val_165 val_165 val_165 val_165 val_165 val_165 val_165
+166 val_166 val_166 val_166 val_166 val_166 val_166 val_166 val_166
+167 val_167 val_167 val_167 val_167 val_167 val_167 val_167 val_167
+167 val_167 val_167 val_167 val_167 val_167 val_167 val_167 val_167
+167 val_167 val_167 val_167 val_167 val_167 val_167 val_167 val_167
+168 val_168 val_168 val_168 val_168 val_168 val_168 val_168 val_168
+169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 val_169
+169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 val_169
+169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 val_169
+169 val_169 val_169 val_169 val_169 val_169 val_169 val_169 val_169
+17 val_17 val_17 val_17 val_17 val_17 val_17 val_17 val_17
+170 val_170 val_170 val_170 val_170 val_170 val_170 val_170 val_170
+172 val_172 val_172 val_172 val_172 val_172 val_172 val_172 val_172
+172 val_172 val_172 val_172 val_172 val_172 val_172 val_172 val_172
+174 val_174 val_174 val_174 val_174 val_174 val_174 val_174 val_174
+174 val_174 val_174 val_174 val_174 val_174 val_174 val_174 val_174
+175 val_175 val_175 val_175 val_175 val_175 val_175 val_175 val_175
+175 val_175 val_175 val_175 val_175 val_175 val_175 val_175 val_175
+176 val_176 val_176 val_176 val_176 val_176 val_176 val_176 val_176
+176 val_176 val_176 val_176 val_176 val_176 val_176 val_176 val_176
+177 val_177 val_177 val_177 val_177 val_177 val_177 val_177 val_177
+178 val_178 val_178 val_178 val_178 val_178 val_178 val_178 val_178
+179 val_179 val_179 val_179 val_179 val_179 val_179 val_179 val_179
+179 val_179 val_179 val_179 val_179 val_179 val_179 val_179 val_179
+18 val_18 val_18 val_18 val_18 val_18 val_18 val_18 val_18
+18 val_18 val_18 val_18 val_18 val_18 val_18 val_18 val_18
+180 val_180 val_180 val_180 val_180 val_180 val_180 val_180 val_180
+181 val_181 val_181 val_181 val_181 val_181 val_181 val_181 val_181
+183 val_183 val_183 val_183 val_183 val_183 val_183 val_183 val_183
+186 val_186 val_186 val_186 val_186 val_186 val_186 val_186 val_186
+187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 val_187
+187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 val_187
+187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 val_187
+PREHOOK: query: -- flush for group-by
+explain
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
+PREHOOK: type: QUERY
+POSTHOOK: query: -- flush for group-by
+explain
+select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: concat(key, value, value, value, value, value, value, value, value, value) (type: string), key (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: complete
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: double)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: double)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ TopN Hash Memory Usage: 2.0E-5
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: double)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 100
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: 100
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0.0
+10.0
+102.0
+105.0
+105.0
+11.0
+111.0
+114.0
+116.0
+116.0
+126.0
+131.0
+133.0
+134.0
+136.0
+143.0
+144.0
+145.0
+15.0
+150.0
+152.0
+153.0
+155.0
+156.0
+157.0
+158.0
+160.0
+162.0
+163.0
+166.0
+166.0
+168.0
+168.0
+17.0
+170.0
+177.0
+178.0
+180.0
+181.0
+183.0
+186.0
+189.0
+19.0
+190.0
+190.0
+192.0
+194.0
+194.0
+196.0
+196.0
+2.0
+20.0
+200.0
+201.0
+202.0
+206.0
+208.0
+210.0
+214.0
+218.0
+222.0
+226.0
+226.0
+228.0
+24.0
+27.0
+28.0
+30.0
+30.0
+33.0
+34.0
+36.0
+4.0
+41.0
+43.0
+44.0
+47.0
+48.0
+52.0
+53.0
+54.0
+57.0
+64.0
+65.0
+66.0
+69.0
+74.0
+74.0
+77.0
+78.0
+8.0
+80.0
+82.0
+84.0
+85.0
+86.0
+87.0
+9.0
+92.0
+96.0
[39/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/bucket2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket2.q.out b/ql/src/test/results/clientpositive/llap/bucket2.q.out
new file mode 100644
index 0000000..a2cfc0a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucket2.q.out
@@ -0,0 +1,473 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket2_1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket2_1
+PREHOOK: query: explain extended
+insert overwrite table bucket2_1
+select * from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucket2_1
+select * from src
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_TABREF
+ TOK_TABNAME
+ src
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_TAB
+ TOK_TABNAME
+ bucket2_1
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_ALLCOLREF
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: string), _col1 (type: string)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: src
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src
+ name: default.src
+ Truncated Path -> Alias:
+ /src [src]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.bucket2_1
+ serialization.ddl struct bucket2_1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket2_1
+ TotalFiles: 2
+ GatherStats: true
+ MultiFileSpray: true
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.bucket2_1
+ serialization.ddl struct bucket2_1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket2_1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucket2_1
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket2_1
+POSTHOOK: query: insert overwrite table bucket2_1
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket2_1
+POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select * from bucket2_1 tablesample (bucket 1 out of 2) s
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from bucket2_1 tablesample (bucket 1 out of 2) s
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: s
+ Filter Operator
+ predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean)
+ Select Operator
+ expressions: key (type: int), value (type: string)
+ outputColumnNames: _col0, _col1
+ ListSink
+
+PREHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket2_1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket2_1
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+104 val_104
+104 val_104
+114 val_114
+116 val_116
+118 val_118
+118 val_118
+12 val_12
+12 val_12
+120 val_120
+120 val_120
+126 val_126
+128 val_128
+128 val_128
+128 val_128
+134 val_134
+134 val_134
+136 val_136
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+146 val_146
+146 val_146
+150 val_150
+152 val_152
+152 val_152
+156 val_156
+158 val_158
+160 val_160
+162 val_162
+164 val_164
+164 val_164
+166 val_166
+168 val_168
+170 val_170
+172 val_172
+172 val_172
+174 val_174
+174 val_174
+176 val_176
+176 val_176
+178 val_178
+18 val_18
+18 val_18
+180 val_180
+186 val_186
+190 val_190
+192 val_192
+194 val_194
+196 val_196
+2 val_2
+20 val_20
+200 val_200
+200 val_200
+202 val_202
+208 val_208
+208 val_208
+208 val_208
+214 val_214
+216 val_216
+216 val_216
+218 val_218
+222 val_222
+224 val_224
+224 val_224
+226 val_226
+228 val_228
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+238 val_238
+238 val_238
+24 val_24
+24 val_24
+242 val_242
+242 val_242
+244 val_244
+248 val_248
+252 val_252
+256 val_256
+256 val_256
+258 val_258
+26 val_26
+26 val_26
+260 val_260
+262 val_262
+266 val_266
+272 val_272
+272 val_272
+274 val_274
+278 val_278
+278 val_278
+28 val_28
+280 val_280
+280 val_280
+282 val_282
+282 val_282
+284 val_284
+286 val_286
+288 val_288
+288 val_288
+292 val_292
+296 val_296
+298 val_298
+298 val_298
+298 val_298
+30 val_30
+302 val_302
+306 val_306
+308 val_308
+310 val_310
+316 val_316
+316 val_316
+316 val_316
+318 val_318
+318 val_318
+318 val_318
+322 val_322
+322 val_322
+332 val_332
+336 val_336
+338 val_338
+34 val_34
+342 val_342
+342 val_342
+344 val_344
+344 val_344
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+366 val_366
+368 val_368
+374 val_374
+378 val_378
+382 val_382
+382 val_382
+384 val_384
+384 val_384
+384 val_384
+386 val_386
+392 val_392
+394 val_394
+396 val_396
+396 val_396
+396 val_396
+4 val_4
+400 val_400
+402 val_402
+404 val_404
+404 val_404
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+414 val_414
+414 val_414
+418 val_418
+42 val_42
+42 val_42
+424 val_424
+424 val_424
+430 val_430
+430 val_430
+430 val_430
+432 val_432
+436 val_436
+438 val_438
+438 val_438
+438 val_438
+44 val_44
+444 val_444
+446 val_446
+448 val_448
+452 val_452
+454 val_454
+454 val_454
+454 val_454
+458 val_458
+458 val_458
+460 val_460
+462 val_462
+462 val_462
+466 val_466
+466 val_466
+466 val_466
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+470 val_470
+472 val_472
+478 val_478
+478 val_478
+480 val_480
+480 val_480
+480 val_480
+482 val_482
+484 val_484
+490 val_490
+492 val_492
+492 val_492
+494 val_494
+496 val_496
+498 val_498
+498 val_498
+498 val_498
+54 val_54
+58 val_58
+58 val_58
+64 val_64
+66 val_66
+70 val_70
+70 val_70
+70 val_70
+72 val_72
+72 val_72
+74 val_74
+76 val_76
+76 val_76
+78 val_78
+8 val_8
+80 val_80
+82 val_82
+84 val_84
+84 val_84
+86 val_86
+90 val_90
+90 val_90
+90 val_90
+92 val_92
+96 val_96
+98 val_98
+98 val_98
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/bucket3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket3.q.out b/ql/src/test/results/clientpositive/llap/bucket3.q.out
new file mode 100644
index 0000000..8c25962
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucket3.q.out
@@ -0,0 +1,498 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket3_1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket3_1
+PREHOOK: query: explain extended
+insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_TABREF
+ TOK_TABNAME
+ src
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_TAB
+ TOK_TABNAME
+ bucket3_1
+ TOK_PARTSPEC
+ TOK_PARTVAL
+ ds
+ '1'
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_ALLCOLREF
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: string), _col1 (type: string)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: src
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src
+ name: default.src
+ Truncated Path -> Alias:
+ /src [src]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 2
+ Static Partition Specification: ds=1/
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.bucket3_1
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket3_1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket3_1
+ TotalFiles: 2
+ GatherStats: true
+ MultiFileSpray: true
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 1
+ replace: true
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.bucket3_1
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket3_1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket3_1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket3_1@ds=1
+POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='1')
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket3_1@ds=1
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table bucket3_1 partition (ds='2')
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket3_1@ds=2
+POSTHOOK: query: insert overwrite table bucket3_1 partition (ds='2')
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket3_1@ds=2
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket3_1 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: s
+ Filter Operator
+ predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean)
+ Select Operator
+ expressions: key (type: int), value (type: string), '1' (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ ListSink
+
+PREHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket3_1
+PREHOOK: Input: default@bucket3_1@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket3_1
+POSTHOOK: Input: default@bucket3_1@ds=1
+#### A masked pattern was here ####
+0 val_0 1
+0 val_0 1
+0 val_0 1
+10 val_10 1
+100 val_100 1
+100 val_100 1
+104 val_104 1
+104 val_104 1
+114 val_114 1
+116 val_116 1
+118 val_118 1
+118 val_118 1
+12 val_12 1
+12 val_12 1
+120 val_120 1
+120 val_120 1
+126 val_126 1
+128 val_128 1
+128 val_128 1
+128 val_128 1
+134 val_134 1
+134 val_134 1
+136 val_136 1
+138 val_138 1
+138 val_138 1
+138 val_138 1
+138 val_138 1
+146 val_146 1
+146 val_146 1
+150 val_150 1
+152 val_152 1
+152 val_152 1
+156 val_156 1
+158 val_158 1
+160 val_160 1
+162 val_162 1
+164 val_164 1
+164 val_164 1
+166 val_166 1
+168 val_168 1
+170 val_170 1
+172 val_172 1
+172 val_172 1
+174 val_174 1
+174 val_174 1
+176 val_176 1
+176 val_176 1
+178 val_178 1
+18 val_18 1
+18 val_18 1
+180 val_180 1
+186 val_186 1
+190 val_190 1
+192 val_192 1
+194 val_194 1
+196 val_196 1
+2 val_2 1
+20 val_20 1
+200 val_200 1
+200 val_200 1
+202 val_202 1
+208 val_208 1
+208 val_208 1
+208 val_208 1
+214 val_214 1
+216 val_216 1
+216 val_216 1
+218 val_218 1
+222 val_222 1
+224 val_224 1
+224 val_224 1
+226 val_226 1
+228 val_228 1
+230 val_230 1
+230 val_230 1
+230 val_230 1
+230 val_230 1
+230 val_230 1
+238 val_238 1
+238 val_238 1
+24 val_24 1
+24 val_24 1
+242 val_242 1
+242 val_242 1
+244 val_244 1
+248 val_248 1
+252 val_252 1
+256 val_256 1
+256 val_256 1
+258 val_258 1
+26 val_26 1
+26 val_26 1
+260 val_260 1
+262 val_262 1
+266 val_266 1
+272 val_272 1
+272 val_272 1
+274 val_274 1
+278 val_278 1
+278 val_278 1
+28 val_28 1
+280 val_280 1
+280 val_280 1
+282 val_282 1
+282 val_282 1
+284 val_284 1
+286 val_286 1
+288 val_288 1
+288 val_288 1
+292 val_292 1
+296 val_296 1
+298 val_298 1
+298 val_298 1
+298 val_298 1
+30 val_30 1
+302 val_302 1
+306 val_306 1
+308 val_308 1
+310 val_310 1
+316 val_316 1
+316 val_316 1
+316 val_316 1
+318 val_318 1
+318 val_318 1
+318 val_318 1
+322 val_322 1
+322 val_322 1
+332 val_332 1
+336 val_336 1
+338 val_338 1
+34 val_34 1
+342 val_342 1
+342 val_342 1
+344 val_344 1
+344 val_344 1
+348 val_348 1
+348 val_348 1
+348 val_348 1
+348 val_348 1
+348 val_348 1
+356 val_356 1
+360 val_360 1
+362 val_362 1
+364 val_364 1
+366 val_366 1
+368 val_368 1
+374 val_374 1
+378 val_378 1
+382 val_382 1
+382 val_382 1
+384 val_384 1
+384 val_384 1
+384 val_384 1
+386 val_386 1
+392 val_392 1
+394 val_394 1
+396 val_396 1
+396 val_396 1
+396 val_396 1
+4 val_4 1
+400 val_400 1
+402 val_402 1
+404 val_404 1
+404 val_404 1
+406 val_406 1
+406 val_406 1
+406 val_406 1
+406 val_406 1
+414 val_414 1
+414 val_414 1
+418 val_418 1
+42 val_42 1
+42 val_42 1
+424 val_424 1
+424 val_424 1
+430 val_430 1
+430 val_430 1
+430 val_430 1
+432 val_432 1
+436 val_436 1
+438 val_438 1
+438 val_438 1
+438 val_438 1
+44 val_44 1
+444 val_444 1
+446 val_446 1
+448 val_448 1
+452 val_452 1
+454 val_454 1
+454 val_454 1
+454 val_454 1
+458 val_458 1
+458 val_458 1
+460 val_460 1
+462 val_462 1
+462 val_462 1
+466 val_466 1
+466 val_466 1
+466 val_466 1
+468 val_468 1
+468 val_468 1
+468 val_468 1
+468 val_468 1
+470 val_470 1
+472 val_472 1
+478 val_478 1
+478 val_478 1
+480 val_480 1
+480 val_480 1
+480 val_480 1
+482 val_482 1
+484 val_484 1
+490 val_490 1
+492 val_492 1
+492 val_492 1
+494 val_494 1
+496 val_496 1
+498 val_498 1
+498 val_498 1
+498 val_498 1
+54 val_54 1
+58 val_58 1
+58 val_58 1
+64 val_64 1
+66 val_66 1
+70 val_70 1
+70 val_70 1
+70 val_70 1
+72 val_72 1
+72 val_72 1
+74 val_74 1
+76 val_76 1
+76 val_76 1
+78 val_78 1
+8 val_8 1
+80 val_80 1
+82 val_82 1
+84 val_84 1
+84 val_84 1
+86 val_86 1
+90 val_90 1
+90 val_90 1
+90 val_90 1
+92 val_92 1
+96 val_96 1
+98 val_98 1
+98 val_98 1
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/bucket4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket4.q.out b/ql/src/test/results/clientpositive/llap/bucket4.q.out
new file mode 100644
index 0000000..c681933
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucket4.q.out
@@ -0,0 +1,472 @@
+PREHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket4_1
+POSTHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket4_1
+PREHOOK: query: explain extended
+insert overwrite table bucket4_1
+select * from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucket4_1
+select * from src
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_TABREF
+ TOK_TABNAME
+ src
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_TAB
+ TOK_TABNAME
+ bucket4_1
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_ALLCOLREF
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: UDFToInteger(_col0) (type: int)
+ sort order: +
+ Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: string), _col1 (type: string)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: src
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src
+ name: default.src
+ Truncated Path -> Alias:
+ /src [src]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.bucket4_1
+ serialization.ddl struct bucket4_1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket4_1
+ TotalFiles: 2
+ GatherStats: true
+ MultiFileSpray: true
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.bucket4_1
+ serialization.ddl struct bucket4_1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket4_1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucket4_1
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket4_1
+POSTHOOK: query: insert overwrite table bucket4_1
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket4_1
+POSTHOOK: Lineage: bucket4_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket4_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select * from bucket4_1 tablesample (bucket 1 out of 2) s
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from bucket4_1 tablesample (bucket 1 out of 2) s
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ TableScan
+ alias: s
+ Filter Operator
+ predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean)
+ Select Operator
+ expressions: key (type: int), value (type: string)
+ outputColumnNames: _col0, _col1
+ ListSink
+
+PREHOOK: query: select * from bucket4_1 tablesample (bucket 1 out of 2) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket4_1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from bucket4_1 tablesample (bucket 1 out of 2) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket4_1
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+2 val_2
+4 val_4
+8 val_8
+10 val_10
+12 val_12
+12 val_12
+18 val_18
+18 val_18
+20 val_20
+24 val_24
+24 val_24
+26 val_26
+26 val_26
+28 val_28
+30 val_30
+34 val_34
+42 val_42
+42 val_42
+44 val_44
+54 val_54
+58 val_58
+58 val_58
+64 val_64
+66 val_66
+70 val_70
+70 val_70
+70 val_70
+72 val_72
+72 val_72
+74 val_74
+76 val_76
+76 val_76
+78 val_78
+80 val_80
+82 val_82
+84 val_84
+84 val_84
+86 val_86
+90 val_90
+90 val_90
+90 val_90
+92 val_92
+96 val_96
+98 val_98
+98 val_98
+100 val_100
+100 val_100
+104 val_104
+104 val_104
+114 val_114
+116 val_116
+118 val_118
+118 val_118
+120 val_120
+120 val_120
+126 val_126
+128 val_128
+128 val_128
+128 val_128
+134 val_134
+134 val_134
+136 val_136
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+146 val_146
+146 val_146
+150 val_150
+152 val_152
+152 val_152
+156 val_156
+158 val_158
+160 val_160
+162 val_162
+164 val_164
+164 val_164
+166 val_166
+168 val_168
+170 val_170
+172 val_172
+172 val_172
+174 val_174
+174 val_174
+176 val_176
+176 val_176
+178 val_178
+180 val_180
+186 val_186
+190 val_190
+192 val_192
+194 val_194
+196 val_196
+200 val_200
+200 val_200
+202 val_202
+208 val_208
+208 val_208
+208 val_208
+214 val_214
+216 val_216
+216 val_216
+218 val_218
+222 val_222
+224 val_224
+224 val_224
+226 val_226
+228 val_228
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+238 val_238
+238 val_238
+242 val_242
+242 val_242
+244 val_244
+248 val_248
+252 val_252
+256 val_256
+256 val_256
+258 val_258
+260 val_260
+262 val_262
+266 val_266
+272 val_272
+272 val_272
+274 val_274
+278 val_278
+278 val_278
+280 val_280
+280 val_280
+282 val_282
+282 val_282
+284 val_284
+286 val_286
+288 val_288
+288 val_288
+292 val_292
+296 val_296
+298 val_298
+298 val_298
+298 val_298
+302 val_302
+306 val_306
+308 val_308
+310 val_310
+316 val_316
+316 val_316
+316 val_316
+318 val_318
+318 val_318
+318 val_318
+322 val_322
+322 val_322
+332 val_332
+336 val_336
+338 val_338
+342 val_342
+342 val_342
+344 val_344
+344 val_344
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+366 val_366
+368 val_368
+374 val_374
+378 val_378
+382 val_382
+382 val_382
+384 val_384
+384 val_384
+384 val_384
+386 val_386
+392 val_392
+394 val_394
+396 val_396
+396 val_396
+396 val_396
+400 val_400
+402 val_402
+404 val_404
+404 val_404
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+414 val_414
+414 val_414
+418 val_418
+424 val_424
+424 val_424
+430 val_430
+430 val_430
+430 val_430
+432 val_432
+436 val_436
+438 val_438
+438 val_438
+438 val_438
+444 val_444
+446 val_446
+448 val_448
+452 val_452
+454 val_454
+454 val_454
+454 val_454
+458 val_458
+458 val_458
+460 val_460
+462 val_462
+462 val_462
+466 val_466
+466 val_466
+466 val_466
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+470 val_470
+472 val_472
+478 val_478
+478 val_478
+480 val_480
+480 val_480
+480 val_480
+482 val_482
+484 val_484
+490 val_490
+492 val_492
+492 val_492
+494 val_494
+496 val_496
+498 val_498
+498 val_498
+498 val_498
[50/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_join21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_join21.q.out b/ql/src/test/results/clientpositive/llap/auto_join21.q.out
new file mode 100644
index 0000000..5ec385c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_join21.q.out
@@ -0,0 +1,615 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key > 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ Right Outer Join1 to 2
+ filter predicates:
+ 0 {(KEY.reducesinkkey0 < 10)}
+ 1
+ 2 {(KEY.reducesinkkey0 < 10)}
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+ sort order: ++++++
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+NULL NULL NULL NULL 0 val_0
+NULL NULL NULL NULL 0 val_0
+NULL NULL NULL NULL 0 val_0
+NULL NULL NULL NULL 10 val_10
+NULL NULL NULL NULL 100 val_100
+NULL NULL NULL NULL 100 val_100
+NULL NULL NULL NULL 103 val_103
+NULL NULL NULL NULL 103 val_103
+NULL NULL NULL NULL 104 val_104
+NULL NULL NULL NULL 104 val_104
+NULL NULL NULL NULL 105 val_105
+NULL NULL NULL NULL 11 val_11
+NULL NULL NULL NULL 111 val_111
+NULL NULL NULL NULL 113 val_113
+NULL NULL NULL NULL 113 val_113
+NULL NULL NULL NULL 114 val_114
+NULL NULL NULL NULL 116 val_116
+NULL NULL NULL NULL 118 val_118
+NULL NULL NULL NULL 118 val_118
+NULL NULL NULL NULL 119 val_119
+NULL NULL NULL NULL 119 val_119
+NULL NULL NULL NULL 119 val_119
+NULL NULL NULL NULL 12 val_12
+NULL NULL NULL NULL 12 val_12
+NULL NULL NULL NULL 120 val_120
+NULL NULL NULL NULL 120 val_120
+NULL NULL NULL NULL 125 val_125
+NULL NULL NULL NULL 125 val_125
+NULL NULL NULL NULL 126 val_126
+NULL NULL NULL NULL 128 val_128
+NULL NULL NULL NULL 128 val_128
+NULL NULL NULL NULL 128 val_128
+NULL NULL NULL NULL 129 val_129
+NULL NULL NULL NULL 129 val_129
+NULL NULL NULL NULL 131 val_131
+NULL NULL NULL NULL 133 val_133
+NULL NULL NULL NULL 134 val_134
+NULL NULL NULL NULL 134 val_134
+NULL NULL NULL NULL 136 val_136
+NULL NULL NULL NULL 137 val_137
+NULL NULL NULL NULL 137 val_137
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 143 val_143
+NULL NULL NULL NULL 145 val_145
+NULL NULL NULL NULL 146 val_146
+NULL NULL NULL NULL 146 val_146
+NULL NULL NULL NULL 149 val_149
+NULL NULL NULL NULL 149 val_149
+NULL NULL NULL NULL 15 val_15
+NULL NULL NULL NULL 15 val_15
+NULL NULL NULL NULL 150 val_150
+NULL NULL NULL NULL 152 val_152
+NULL NULL NULL NULL 152 val_152
+NULL NULL NULL NULL 153 val_153
+NULL NULL NULL NULL 155 val_155
+NULL NULL NULL NULL 156 val_156
+NULL NULL NULL NULL 157 val_157
+NULL NULL NULL NULL 158 val_158
+NULL NULL NULL NULL 160 val_160
+NULL NULL NULL NULL 162 val_162
+NULL NULL NULL NULL 163 val_163
+NULL NULL NULL NULL 164 val_164
+NULL NULL NULL NULL 164 val_164
+NULL NULL NULL NULL 165 val_165
+NULL NULL NULL NULL 165 val_165
+NULL NULL NULL NULL 166 val_166
+NULL NULL NULL NULL 167 val_167
+NULL NULL NULL NULL 167 val_167
+NULL NULL NULL NULL 167 val_167
+NULL NULL NULL NULL 168 val_168
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 17 val_17
+NULL NULL NULL NULL 170 val_170
+NULL NULL NULL NULL 172 val_172
+NULL NULL NULL NULL 172 val_172
+NULL NULL NULL NULL 174 val_174
+NULL NULL NULL NULL 174 val_174
+NULL NULL NULL NULL 175 val_175
+NULL NULL NULL NULL 175 val_175
+NULL NULL NULL NULL 176 val_176
+NULL NULL NULL NULL 176 val_176
+NULL NULL NULL NULL 177 val_177
+NULL NULL NULL NULL 178 val_178
+NULL NULL NULL NULL 179 val_179
+NULL NULL NULL NULL 179 val_179
+NULL NULL NULL NULL 18 val_18
+NULL NULL NULL NULL 18 val_18
+NULL NULL NULL NULL 180 val_180
+NULL NULL NULL NULL 181 val_181
+NULL NULL NULL NULL 183 val_183
+NULL NULL NULL NULL 186 val_186
+NULL NULL NULL NULL 187 val_187
+NULL NULL NULL NULL 187 val_187
+NULL NULL NULL NULL 187 val_187
+NULL NULL NULL NULL 189 val_189
+NULL NULL NULL NULL 19 val_19
+NULL NULL NULL NULL 190 val_190
+NULL NULL NULL NULL 191 val_191
+NULL NULL NULL NULL 191 val_191
+NULL NULL NULL NULL 192 val_192
+NULL NULL NULL NULL 193 val_193
+NULL NULL NULL NULL 193 val_193
+NULL NULL NULL NULL 193 val_193
+NULL NULL NULL NULL 194 val_194
+NULL NULL NULL NULL 195 val_195
+NULL NULL NULL NULL 195 val_195
+NULL NULL NULL NULL 196 val_196
+NULL NULL NULL NULL 197 val_197
+NULL NULL NULL NULL 197 val_197
+NULL NULL NULL NULL 199 val_199
+NULL NULL NULL NULL 199 val_199
+NULL NULL NULL NULL 199 val_199
+NULL NULL NULL NULL 2 val_2
+NULL NULL NULL NULL 20 val_20
+NULL NULL NULL NULL 200 val_200
+NULL NULL NULL NULL 200 val_200
+NULL NULL NULL NULL 201 val_201
+NULL NULL NULL NULL 202 val_202
+NULL NULL NULL NULL 203 val_203
+NULL NULL NULL NULL 203 val_203
+NULL NULL NULL NULL 205 val_205
+NULL NULL NULL NULL 205 val_205
+NULL NULL NULL NULL 207 val_207
+NULL NULL NULL NULL 207 val_207
+NULL NULL NULL NULL 208 val_208
+NULL NULL NULL NULL 208 val_208
+NULL NULL NULL NULL 208 val_208
+NULL NULL NULL NULL 209 val_209
+NULL NULL NULL NULL 209 val_209
+NULL NULL NULL NULL 213 val_213
+NULL NULL NULL NULL 213 val_213
+NULL NULL NULL NULL 214 val_214
+NULL NULL NULL NULL 216 val_216
+NULL NULL NULL NULL 216 val_216
+NULL NULL NULL NULL 217 val_217
+NULL NULL NULL NULL 217 val_217
+NULL NULL NULL NULL 218 val_218
+NULL NULL NULL NULL 219 val_219
+NULL NULL NULL NULL 219 val_219
+NULL NULL NULL NULL 221 val_221
+NULL NULL NULL NULL 221 val_221
+NULL NULL NULL NULL 222 val_222
+NULL NULL NULL NULL 223 val_223
+NULL NULL NULL NULL 223 val_223
+NULL NULL NULL NULL 224 val_224
+NULL NULL NULL NULL 224 val_224
+NULL NULL NULL NULL 226 val_226
+NULL NULL NULL NULL 228 val_228
+NULL NULL NULL NULL 229 val_229
+NULL NULL NULL NULL 229 val_229
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 233 val_233
+NULL NULL NULL NULL 233 val_233
+NULL NULL NULL NULL 235 val_235
+NULL NULL NULL NULL 237 val_237
+NULL NULL NULL NULL 237 val_237
+NULL NULL NULL NULL 238 val_238
+NULL NULL NULL NULL 238 val_238
+NULL NULL NULL NULL 239 val_239
+NULL NULL NULL NULL 239 val_239
+NULL NULL NULL NULL 24 val_24
+NULL NULL NULL NULL 24 val_24
+NULL NULL NULL NULL 241 val_241
+NULL NULL NULL NULL 242 val_242
+NULL NULL NULL NULL 242 val_242
+NULL NULL NULL NULL 244 val_244
+NULL NULL NULL NULL 247 val_247
+NULL NULL NULL NULL 248 val_248
+NULL NULL NULL NULL 249 val_249
+NULL NULL NULL NULL 252 val_252
+NULL NULL NULL NULL 255 val_255
+NULL NULL NULL NULL 255 val_255
+NULL NULL NULL NULL 256 val_256
+NULL NULL NULL NULL 256 val_256
+NULL NULL NULL NULL 257 val_257
+NULL NULL NULL NULL 258 val_258
+NULL NULL NULL NULL 26 val_26
+NULL NULL NULL NULL 26 val_26
+NULL NULL NULL NULL 260 val_260
+NULL NULL NULL NULL 262 val_262
+NULL NULL NULL NULL 263 val_263
+NULL NULL NULL NULL 265 val_265
+NULL NULL NULL NULL 265 val_265
+NULL NULL NULL NULL 266 val_266
+NULL NULL NULL NULL 27 val_27
+NULL NULL NULL NULL 272 val_272
+NULL NULL NULL NULL 272 val_272
+NULL NULL NULL NULL 273 val_273
+NULL NULL NULL NULL 273 val_273
+NULL NULL NULL NULL 273 val_273
+NULL NULL NULL NULL 274 val_274
+NULL NULL NULL NULL 275 val_275
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 278 val_278
+NULL NULL NULL NULL 278 val_278
+NULL NULL NULL NULL 28 val_28
+NULL NULL NULL NULL 280 val_280
+NULL NULL NULL NULL 280 val_280
+NULL NULL NULL NULL 281 val_281
+NULL NULL NULL NULL 281 val_281
+NULL NULL NULL NULL 282 val_282
+NULL NULL NULL NULL 282 val_282
+NULL NULL NULL NULL 283 val_283
+NULL NULL NULL NULL 284 val_284
+NULL NULL NULL NULL 285 val_285
+NULL NULL NULL NULL 286 val_286
+NULL NULL NULL NULL 287 val_287
+NULL NULL NULL NULL 288 val_288
+NULL NULL NULL NULL 288 val_288
+NULL NULL NULL NULL 289 val_289
+NULL NULL NULL NULL 291 val_291
+NULL NULL NULL NULL 292 val_292
+NULL NULL NULL NULL 296 val_296
+NULL NULL NULL NULL 298 val_298
+NULL NULL NULL NULL 298 val_298
+NULL NULL NULL NULL 298 val_298
+NULL NULL NULL NULL 30 val_30
+NULL NULL NULL NULL 302 val_302
+NULL NULL NULL NULL 305 val_305
+NULL NULL NULL NULL 306 val_306
+NULL NULL NULL NULL 307 val_307
+NULL NULL NULL NULL 307 val_307
+NULL NULL NULL NULL 308 val_308
+NULL NULL NULL NULL 309 val_309
+NULL NULL NULL NULL 309 val_309
+NULL NULL NULL NULL 310 val_310
+NULL NULL NULL NULL 311 val_311
+NULL NULL NULL NULL 311 val_311
+NULL NULL NULL NULL 311 val_311
+NULL NULL NULL NULL 315 val_315
+NULL NULL NULL NULL 316 val_316
+NULL NULL NULL NULL 316 val_316
+NULL NULL NULL NULL 316 val_316
+NULL NULL NULL NULL 317 val_317
+NULL NULL NULL NULL 317 val_317
+NULL NULL NULL NULL 318 val_318
+NULL NULL NULL NULL 318 val_318
+NULL NULL NULL NULL 318 val_318
+NULL NULL NULL NULL 321 val_321
+NULL NULL NULL NULL 321 val_321
+NULL NULL NULL NULL 322 val_322
+NULL NULL NULL NULL 322 val_322
+NULL NULL NULL NULL 323 val_323
+NULL NULL NULL NULL 325 val_325
+NULL NULL NULL NULL 325 val_325
+NULL NULL NULL NULL 327 val_327
+NULL NULL NULL NULL 327 val_327
+NULL NULL NULL NULL 327 val_327
+NULL NULL NULL NULL 33 val_33
+NULL NULL NULL NULL 331 val_331
+NULL NULL NULL NULL 331 val_331
+NULL NULL NULL NULL 332 val_332
+NULL NULL NULL NULL 333 val_333
+NULL NULL NULL NULL 333 val_333
+NULL NULL NULL NULL 335 val_335
+NULL NULL NULL NULL 336 val_336
+NULL NULL NULL NULL 338 val_338
+NULL NULL NULL NULL 339 val_339
+NULL NULL NULL NULL 34 val_34
+NULL NULL NULL NULL 341 val_341
+NULL NULL NULL NULL 342 val_342
+NULL NULL NULL NULL 342 val_342
+NULL NULL NULL NULL 344 val_344
+NULL NULL NULL NULL 344 val_344
+NULL NULL NULL NULL 345 val_345
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 35 val_35
+NULL NULL NULL NULL 35 val_35
+NULL NULL NULL NULL 35 val_35
+NULL NULL NULL NULL 351 val_351
+NULL NULL NULL NULL 353 val_353
+NULL NULL NULL NULL 353 val_353
+NULL NULL NULL NULL 356 val_356
+NULL NULL NULL NULL 360 val_360
+NULL NULL NULL NULL 362 val_362
+NULL NULL NULL NULL 364 val_364
+NULL NULL NULL NULL 365 val_365
+NULL NULL NULL NULL 366 val_366
+NULL NULL NULL NULL 367 val_367
+NULL NULL NULL NULL 367 val_367
+NULL NULL NULL NULL 368 val_368
+NULL NULL NULL NULL 369 val_369
+NULL NULL NULL NULL 369 val_369
+NULL NULL NULL NULL 369 val_369
+NULL NULL NULL NULL 37 val_37
+NULL NULL NULL NULL 37 val_37
+NULL NULL NULL NULL 373 val_373
+NULL NULL NULL NULL 374 val_374
+NULL NULL NULL NULL 375 val_375
+NULL NULL NULL NULL 377 val_377
+NULL NULL NULL NULL 378 val_378
+NULL NULL NULL NULL 379 val_379
+NULL NULL NULL NULL 382 val_382
+NULL NULL NULL NULL 382 val_382
+NULL NULL NULL NULL 384 val_384
+NULL NULL NULL NULL 384 val_384
+NULL NULL NULL NULL 384 val_384
+NULL NULL NULL NULL 386 val_386
+NULL NULL NULL NULL 389 val_389
+NULL NULL NULL NULL 392 val_392
+NULL NULL NULL NULL 393 val_393
+NULL NULL NULL NULL 394 val_394
+NULL NULL NULL NULL 395 val_395
+NULL NULL NULL NULL 395 val_395
+NULL NULL NULL NULL 396 val_396
+NULL NULL NULL NULL 396 val_396
+NULL NULL NULL NULL 396 val_396
+NULL NULL NULL NULL 397 val_397
+NULL NULL NULL NULL 397 val_397
+NULL NULL NULL NULL 399 val_399
+NULL NULL NULL NULL 399 val_399
+NULL NULL NULL NULL 4 val_4
+NULL NULL NULL NULL 400 val_400
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 402 val_402
+NULL NULL NULL NULL 403 val_403
+NULL NULL NULL NULL 403 val_403
+NULL NULL NULL NULL 403 val_403
+NULL NULL NULL NULL 404 val_404
+NULL NULL NULL NULL 404 val_404
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 407 val_407
+NULL NULL NULL NULL 409 val_409
+NULL NULL NULL NULL 409 val_409
+NULL NULL NULL NULL 409 val_409
+NULL NULL NULL NULL 41 val_41
+NULL NULL NULL NULL 411 val_411
+NULL NULL NULL NULL 413 val_413
+NULL NULL NULL NULL 413 val_413
+NULL NULL NULL NULL 414 val_414
+NULL NULL NULL NULL 414 val_414
+NULL NULL NULL NULL 417 val_417
+NULL NULL NULL NULL 417 val_417
+NULL NULL NULL NULL 417 val_417
+NULL NULL NULL NULL 418 val_418
+NULL NULL NULL NULL 419 val_419
+NULL NULL NULL NULL 42 val_42
+NULL NULL NULL NULL 42 val_42
+NULL NULL NULL NULL 421 val_421
+NULL NULL NULL NULL 424 val_424
+NULL NULL NULL NULL 424 val_424
+NULL NULL NULL NULL 427 val_427
+NULL NULL NULL NULL 429 val_429
+NULL NULL NULL NULL 429 val_429
+NULL NULL NULL NULL 43 val_43
+NULL NULL NULL NULL 430 val_430
+NULL NULL NULL NULL 430 val_430
+NULL NULL NULL NULL 430 val_430
+NULL NULL NULL NULL 431 val_431
+NULL NULL NULL NULL 431 val_431
+NULL NULL NULL NULL 431 val_431
+NULL NULL NULL NULL 432 val_432
+NULL NULL NULL NULL 435 val_435
+NULL NULL NULL NULL 436 val_436
+NULL NULL NULL NULL 437 val_437
+NULL NULL NULL NULL 438 val_438
+NULL NULL NULL NULL 438 val_438
+NULL NULL NULL NULL 438 val_438
+NULL NULL NULL NULL 439 val_439
+NULL NULL NULL NULL 439 val_439
+NULL NULL NULL NULL 44 val_44
+NULL NULL NULL NULL 443 val_443
+NULL NULL NULL NULL 444 val_444
+NULL NULL NULL NULL 446 val_446
+NULL NULL NULL NULL 448 val_448
+NULL NULL NULL NULL 449 val_449
+NULL NULL NULL NULL 452 val_452
+NULL NULL NULL NULL 453 val_453
+NULL NULL NULL NULL 454 val_454
+NULL NULL NULL NULL 454 val_454
+NULL NULL NULL NULL 454 val_454
+NULL NULL NULL NULL 455 val_455
+NULL NULL NULL NULL 457 val_457
+NULL NULL NULL NULL 458 val_458
+NULL NULL NULL NULL 458 val_458
+NULL NULL NULL NULL 459 val_459
+NULL NULL NULL NULL 459 val_459
+NULL NULL NULL NULL 460 val_460
+NULL NULL NULL NULL 462 val_462
+NULL NULL NULL NULL 462 val_462
+NULL NULL NULL NULL 463 val_463
+NULL NULL NULL NULL 463 val_463
+NULL NULL NULL NULL 466 val_466
+NULL NULL NULL NULL 466 val_466
+NULL NULL NULL NULL 466 val_466
+NULL NULL NULL NULL 467 val_467
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 47 val_47
+NULL NULL NULL NULL 470 val_470
+NULL NULL NULL NULL 472 val_472
+NULL NULL NULL NULL 475 val_475
+NULL NULL NULL NULL 477 val_477
+NULL NULL NULL NULL 478 val_478
+NULL NULL NULL NULL 478 val_478
+NULL NULL NULL NULL 479 val_479
+NULL NULL NULL NULL 480 val_480
+NULL NULL NULL NULL 480 val_480
+NULL NULL NULL NULL 480 val_480
+NULL NULL NULL NULL 481 val_481
+NULL NULL NULL NULL 482 val_482
+NULL NULL NULL NULL 483 val_483
+NULL NULL NULL NULL 484 val_484
+NULL NULL NULL NULL 485 val_485
+NULL NULL NULL NULL 487 val_487
+NULL NULL NULL NULL 489 val_489
+NULL NULL NULL NULL 489 val_489
+NULL NULL NULL NULL 489 val_489
+NULL NULL NULL NULL 489 val_489
+NULL NULL NULL NULL 490 val_490
+NULL NULL NULL NULL 491 val_491
+NULL NULL NULL NULL 492 val_492
+NULL NULL NULL NULL 492 val_492
+NULL NULL NULL NULL 493 val_493
+NULL NULL NULL NULL 494 val_494
+NULL NULL NULL NULL 495 val_495
+NULL NULL NULL NULL 496 val_496
+NULL NULL NULL NULL 497 val_497
+NULL NULL NULL NULL 498 val_498
+NULL NULL NULL NULL 498 val_498
+NULL NULL NULL NULL 498 val_498
+NULL NULL NULL NULL 5 val_5
+NULL NULL NULL NULL 5 val_5
+NULL NULL NULL NULL 5 val_5
+NULL NULL NULL NULL 51 val_51
+NULL NULL NULL NULL 51 val_51
+NULL NULL NULL NULL 53 val_53
+NULL NULL NULL NULL 54 val_54
+NULL NULL NULL NULL 57 val_57
+NULL NULL NULL NULL 58 val_58
+NULL NULL NULL NULL 58 val_58
+NULL NULL NULL NULL 64 val_64
+NULL NULL NULL NULL 65 val_65
+NULL NULL NULL NULL 66 val_66
+NULL NULL NULL NULL 67 val_67
+NULL NULL NULL NULL 67 val_67
+NULL NULL NULL NULL 69 val_69
+NULL NULL NULL NULL 70 val_70
+NULL NULL NULL NULL 70 val_70
+NULL NULL NULL NULL 70 val_70
+NULL NULL NULL NULL 72 val_72
+NULL NULL NULL NULL 72 val_72
+NULL NULL NULL NULL 74 val_74
+NULL NULL NULL NULL 76 val_76
+NULL NULL NULL NULL 76 val_76
+NULL NULL NULL NULL 77 val_77
+NULL NULL NULL NULL 78 val_78
+NULL NULL NULL NULL 8 val_8
+NULL NULL NULL NULL 80 val_80
+NULL NULL NULL NULL 82 val_82
+NULL NULL NULL NULL 83 val_83
+NULL NULL NULL NULL 83 val_83
+NULL NULL NULL NULL 84 val_84
+NULL NULL NULL NULL 84 val_84
+NULL NULL NULL NULL 85 val_85
+NULL NULL NULL NULL 86 val_86
+NULL NULL NULL NULL 87 val_87
+NULL NULL NULL NULL 9 val_9
+NULL NULL NULL NULL 90 val_90
+NULL NULL NULL NULL 90 val_90
+NULL NULL NULL NULL 90 val_90
+NULL NULL NULL NULL 92 val_92
+NULL NULL NULL NULL 95 val_95
+NULL NULL NULL NULL 95 val_95
+NULL NULL NULL NULL 96 val_96
+NULL NULL NULL NULL 97 val_97
+NULL NULL NULL NULL 97 val_97
+NULL NULL NULL NULL 98 val_98
+NULL NULL NULL NULL 98 val_98
[05/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/parallel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/parallel.q.out b/ql/src/test/results/clientpositive/llap/parallel.q.out
new file mode 100644
index 0000000..eb00a72
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/parallel.q.out
@@ -0,0 +1,1444 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table if not exists src_a like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_a
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table if not exists src_a like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_a
+PREHOOK: query: create table if not exists src_b like src
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_b
+POSTHOOK: query: create table if not exists src_b like src
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_b
+PREHOOK: query: explain
+from (select key, value from src group by key, value) s
+insert overwrite table src_a select s.key, s.value group by s.key, s.value
+insert overwrite table src_b select s.key, s.value group by s.key, s.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from (select key, value from src group by key, value) s
+insert overwrite table src_a select s.key, s.value group by s.key, s.value
+insert overwrite table src_b select s.key, s.value group by s.key, s.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-3 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-3
+ Stage-4 depends on stages: Stage-0
+ Stage-1 depends on stages: Stage-3
+ Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: key, value
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: key (type: string), value (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Forward
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: complete
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src_a
+ Group By Operator
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: complete
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src_b
+
+ Stage: Stage-3
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src_a
+
+ Stage: Stage-4
+ Stats-Aggr Operator
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src_b
+
+ Stage: Stage-5
+ Stats-Aggr Operator
+
+PREHOOK: query: from (select key, value from src group by key, value) s
+insert overwrite table src_a select s.key, s.value group by s.key, s.value
+insert overwrite table src_b select s.key, s.value group by s.key, s.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_a
+PREHOOK: Output: default@src_b
+POSTHOOK: query: from (select key, value from src group by key, value) s
+insert overwrite table src_a select s.key, s.value group by s.key, s.value
+insert overwrite table src_b select s.key, s.value group by s.key, s.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_a
+POSTHOOK: Output: default@src_b
+POSTHOOK: Lineage: src_a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_a
+#### A masked pattern was here ####
+0 val_0
+10 val_10
+100 val_100
+103 val_103
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+119 val_119
+12 val_12
+120 val_120
+125 val_125
+126 val_126
+128 val_128
+129 val_129
+131 val_131
+133 val_133
+134 val_134
+136 val_136
+137 val_137
+138 val_138
+143 val_143
+145 val_145
+146 val_146
+149 val_149
+15 val_15
+150 val_150
+152 val_152
+153 val_153
+155 val_155
+156 val_156
+157 val_157
+158 val_158
+160 val_160
+162 val_162
+163 val_163
+164 val_164
+165 val_165
+166 val_166
+167 val_167
+168 val_168
+169 val_169
+17 val_17
+170 val_170
+172 val_172
+174 val_174
+175 val_175
+176 val_176
+177 val_177
+178 val_178
+179 val_179
+18 val_18
+180 val_180
+181 val_181
+183 val_183
+186 val_186
+187 val_187
+189 val_189
+19 val_19
+190 val_190
+191 val_191
+192 val_192
+193 val_193
+194 val_194
+195 val_195
+196 val_196
+197 val_197
+199 val_199
+2 val_2
+20 val_20
+200 val_200
+201 val_201
+202 val_202
+203 val_203
+205 val_205
+207 val_207
+208 val_208
+209 val_209
+213 val_213
+214 val_214
+216 val_216
+217 val_217
+218 val_218
+219 val_219
+221 val_221
+222 val_222
+223 val_223
+224 val_224
+226 val_226
+228 val_228
+229 val_229
+230 val_230
+233 val_233
+235 val_235
+237 val_237
+238 val_238
+239 val_239
+24 val_24
+241 val_241
+242 val_242
+244 val_244
+247 val_247
+248 val_248
+249 val_249
+252 val_252
+255 val_255
+256 val_256
+257 val_257
+258 val_258
+26 val_26
+260 val_260
+262 val_262
+263 val_263
+265 val_265
+266 val_266
+27 val_27
+272 val_272
+273 val_273
+274 val_274
+275 val_275
+277 val_277
+278 val_278
+28 val_28
+280 val_280
+281 val_281
+282 val_282
+283 val_283
+284 val_284
+285 val_285
+286 val_286
+287 val_287
+288 val_288
+289 val_289
+291 val_291
+292 val_292
+296 val_296
+298 val_298
+30 val_30
+302 val_302
+305 val_305
+306 val_306
+307 val_307
+308 val_308
+309 val_309
+310 val_310
+311 val_311
+315 val_315
+316 val_316
+317 val_317
+318 val_318
+321 val_321
+322 val_322
+323 val_323
+325 val_325
+327 val_327
+33 val_33
+331 val_331
+332 val_332
+333 val_333
+335 val_335
+336 val_336
+338 val_338
+339 val_339
+34 val_34
+341 val_341
+342 val_342
+344 val_344
+345 val_345
+348 val_348
+35 val_35
+351 val_351
+353 val_353
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+365 val_365
+366 val_366
+367 val_367
+368 val_368
+369 val_369
+37 val_37
+373 val_373
+374 val_374
+375 val_375
+377 val_377
+378 val_378
+379 val_379
+382 val_382
+384 val_384
+386 val_386
+389 val_389
+392 val_392
+393 val_393
+394 val_394
+395 val_395
+396 val_396
+397 val_397
+399 val_399
+4 val_4
+400 val_400
+401 val_401
+402 val_402
+403 val_403
+404 val_404
+406 val_406
+407 val_407
+409 val_409
+41 val_41
+411 val_411
+413 val_413
+414 val_414
+417 val_417
+418 val_418
+419 val_419
+42 val_42
+421 val_421
+424 val_424
+427 val_427
+429 val_429
+43 val_43
+430 val_430
+431 val_431
+432 val_432
+435 val_435
+436 val_436
+437 val_437
+438 val_438
+439 val_439
+44 val_44
+443 val_443
+444 val_444
+446 val_446
+448 val_448
+449 val_449
+452 val_452
+453 val_453
+454 val_454
+455 val_455
+457 val_457
+458 val_458
+459 val_459
+460 val_460
+462 val_462
+463 val_463
+466 val_466
+467 val_467
+468 val_468
+469 val_469
+47 val_47
+470 val_470
+472 val_472
+475 val_475
+477 val_477
+478 val_478
+479 val_479
+480 val_480
+481 val_481
+482 val_482
+483 val_483
+484 val_484
+485 val_485
+487 val_487
+489 val_489
+490 val_490
+491 val_491
+492 val_492
+493 val_493
+494 val_494
+495 val_495
+496 val_496
+497 val_497
+498 val_498
+5 val_5
+51 val_51
+53 val_53
+54 val_54
+57 val_57
+58 val_58
+64 val_64
+65 val_65
+66 val_66
+67 val_67
+69 val_69
+70 val_70
+72 val_72
+74 val_74
+76 val_76
+77 val_77
+78 val_78
+8 val_8
+80 val_80
+82 val_82
+83 val_83
+84 val_84
+85 val_85
+86 val_86
+87 val_87
+9 val_9
+90 val_90
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
+PREHOOK: query: select * from src_b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_b
+#### A masked pattern was here ####
+0 val_0
+10 val_10
+100 val_100
+103 val_103
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+119 val_119
+12 val_12
+120 val_120
+125 val_125
+126 val_126
+128 val_128
+129 val_129
+131 val_131
+133 val_133
+134 val_134
+136 val_136
+137 val_137
+138 val_138
+143 val_143
+145 val_145
+146 val_146
+149 val_149
+15 val_15
+150 val_150
+152 val_152
+153 val_153
+155 val_155
+156 val_156
+157 val_157
+158 val_158
+160 val_160
+162 val_162
+163 val_163
+164 val_164
+165 val_165
+166 val_166
+167 val_167
+168 val_168
+169 val_169
+17 val_17
+170 val_170
+172 val_172
+174 val_174
+175 val_175
+176 val_176
+177 val_177
+178 val_178
+179 val_179
+18 val_18
+180 val_180
+181 val_181
+183 val_183
+186 val_186
+187 val_187
+189 val_189
+19 val_19
+190 val_190
+191 val_191
+192 val_192
+193 val_193
+194 val_194
+195 val_195
+196 val_196
+197 val_197
+199 val_199
+2 val_2
+20 val_20
+200 val_200
+201 val_201
+202 val_202
+203 val_203
+205 val_205
+207 val_207
+208 val_208
+209 val_209
+213 val_213
+214 val_214
+216 val_216
+217 val_217
+218 val_218
+219 val_219
+221 val_221
+222 val_222
+223 val_223
+224 val_224
+226 val_226
+228 val_228
+229 val_229
+230 val_230
+233 val_233
+235 val_235
+237 val_237
+238 val_238
+239 val_239
+24 val_24
+241 val_241
+242 val_242
+244 val_244
+247 val_247
+248 val_248
+249 val_249
+252 val_252
+255 val_255
+256 val_256
+257 val_257
+258 val_258
+26 val_26
+260 val_260
+262 val_262
+263 val_263
+265 val_265
+266 val_266
+27 val_27
+272 val_272
+273 val_273
+274 val_274
+275 val_275
+277 val_277
+278 val_278
+28 val_28
+280 val_280
+281 val_281
+282 val_282
+283 val_283
+284 val_284
+285 val_285
+286 val_286
+287 val_287
+288 val_288
+289 val_289
+291 val_291
+292 val_292
+296 val_296
+298 val_298
+30 val_30
+302 val_302
+305 val_305
+306 val_306
+307 val_307
+308 val_308
+309 val_309
+310 val_310
+311 val_311
+315 val_315
+316 val_316
+317 val_317
+318 val_318
+321 val_321
+322 val_322
+323 val_323
+325 val_325
+327 val_327
+33 val_33
+331 val_331
+332 val_332
+333 val_333
+335 val_335
+336 val_336
+338 val_338
+339 val_339
+34 val_34
+341 val_341
+342 val_342
+344 val_344
+345 val_345
+348 val_348
+35 val_35
+351 val_351
+353 val_353
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+365 val_365
+366 val_366
+367 val_367
+368 val_368
+369 val_369
+37 val_37
+373 val_373
+374 val_374
+375 val_375
+377 val_377
+378 val_378
+379 val_379
+382 val_382
+384 val_384
+386 val_386
+389 val_389
+392 val_392
+393 val_393
+394 val_394
+395 val_395
+396 val_396
+397 val_397
+399 val_399
+4 val_4
+400 val_400
+401 val_401
+402 val_402
+403 val_403
+404 val_404
+406 val_406
+407 val_407
+409 val_409
+41 val_41
+411 val_411
+413 val_413
+414 val_414
+417 val_417
+418 val_418
+419 val_419
+42 val_42
+421 val_421
+424 val_424
+427 val_427
+429 val_429
+43 val_43
+430 val_430
+431 val_431
+432 val_432
+435 val_435
+436 val_436
+437 val_437
+438 val_438
+439 val_439
+44 val_44
+443 val_443
+444 val_444
+446 val_446
+448 val_448
+449 val_449
+452 val_452
+453 val_453
+454 val_454
+455 val_455
+457 val_457
+458 val_458
+459 val_459
+460 val_460
+462 val_462
+463 val_463
+466 val_466
+467 val_467
+468 val_468
+469 val_469
+47 val_47
+470 val_470
+472 val_472
+475 val_475
+477 val_477
+478 val_478
+479 val_479
+480 val_480
+481 val_481
+482 val_482
+483 val_483
+484 val_484
+485 val_485
+487 val_487
+489 val_489
+490 val_490
+491 val_491
+492 val_492
+493 val_493
+494 val_494
+495 val_495
+496 val_496
+497 val_497
+498 val_498
+5 val_5
+51 val_51
+53 val_53
+54 val_54
+57 val_57
+58 val_58
+64 val_64
+65 val_65
+66 val_66
+67 val_67
+69 val_69
+70 val_70
+72 val_72
+74 val_74
+76 val_76
+77 val_77
+78 val_78
+8 val_8
+80 val_80
+82 val_82
+83 val_83
+84 val_84
+85 val_85
+86 val_86
+87 val_87
+9 val_9
+90 val_90
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
+PREHOOK: query: from (select key, value from src group by key, value) s
+insert overwrite table src_a select s.key, s.value group by s.key, s.value
+insert overwrite table src_b select s.key, s.value group by s.key, s.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_a
+PREHOOK: Output: default@src_b
+POSTHOOK: query: from (select key, value from src group by key, value) s
+insert overwrite table src_a select s.key, s.value group by s.key, s.value
+insert overwrite table src_b select s.key, s.value group by s.key, s.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_a
+POSTHOOK: Output: default@src_b
+POSTHOOK: Lineage: src_a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_a
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_a
+#### A masked pattern was here ####
+0 val_0
+10 val_10
+100 val_100
+103 val_103
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+119 val_119
+12 val_12
+120 val_120
+125 val_125
+126 val_126
+128 val_128
+129 val_129
+131 val_131
+133 val_133
+134 val_134
+136 val_136
+137 val_137
+138 val_138
+143 val_143
+145 val_145
+146 val_146
+149 val_149
+15 val_15
+150 val_150
+152 val_152
+153 val_153
+155 val_155
+156 val_156
+157 val_157
+158 val_158
+160 val_160
+162 val_162
+163 val_163
+164 val_164
+165 val_165
+166 val_166
+167 val_167
+168 val_168
+169 val_169
+17 val_17
+170 val_170
+172 val_172
+174 val_174
+175 val_175
+176 val_176
+177 val_177
+178 val_178
+179 val_179
+18 val_18
+180 val_180
+181 val_181
+183 val_183
+186 val_186
+187 val_187
+189 val_189
+19 val_19
+190 val_190
+191 val_191
+192 val_192
+193 val_193
+194 val_194
+195 val_195
+196 val_196
+197 val_197
+199 val_199
+2 val_2
+20 val_20
+200 val_200
+201 val_201
+202 val_202
+203 val_203
+205 val_205
+207 val_207
+208 val_208
+209 val_209
+213 val_213
+214 val_214
+216 val_216
+217 val_217
+218 val_218
+219 val_219
+221 val_221
+222 val_222
+223 val_223
+224 val_224
+226 val_226
+228 val_228
+229 val_229
+230 val_230
+233 val_233
+235 val_235
+237 val_237
+238 val_238
+239 val_239
+24 val_24
+241 val_241
+242 val_242
+244 val_244
+247 val_247
+248 val_248
+249 val_249
+252 val_252
+255 val_255
+256 val_256
+257 val_257
+258 val_258
+26 val_26
+260 val_260
+262 val_262
+263 val_263
+265 val_265
+266 val_266
+27 val_27
+272 val_272
+273 val_273
+274 val_274
+275 val_275
+277 val_277
+278 val_278
+28 val_28
+280 val_280
+281 val_281
+282 val_282
+283 val_283
+284 val_284
+285 val_285
+286 val_286
+287 val_287
+288 val_288
+289 val_289
+291 val_291
+292 val_292
+296 val_296
+298 val_298
+30 val_30
+302 val_302
+305 val_305
+306 val_306
+307 val_307
+308 val_308
+309 val_309
+310 val_310
+311 val_311
+315 val_315
+316 val_316
+317 val_317
+318 val_318
+321 val_321
+322 val_322
+323 val_323
+325 val_325
+327 val_327
+33 val_33
+331 val_331
+332 val_332
+333 val_333
+335 val_335
+336 val_336
+338 val_338
+339 val_339
+34 val_34
+341 val_341
+342 val_342
+344 val_344
+345 val_345
+348 val_348
+35 val_35
+351 val_351
+353 val_353
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+365 val_365
+366 val_366
+367 val_367
+368 val_368
+369 val_369
+37 val_37
+373 val_373
+374 val_374
+375 val_375
+377 val_377
+378 val_378
+379 val_379
+382 val_382
+384 val_384
+386 val_386
+389 val_389
+392 val_392
+393 val_393
+394 val_394
+395 val_395
+396 val_396
+397 val_397
+399 val_399
+4 val_4
+400 val_400
+401 val_401
+402 val_402
+403 val_403
+404 val_404
+406 val_406
+407 val_407
+409 val_409
+41 val_41
+411 val_411
+413 val_413
+414 val_414
+417 val_417
+418 val_418
+419 val_419
+42 val_42
+421 val_421
+424 val_424
+427 val_427
+429 val_429
+43 val_43
+430 val_430
+431 val_431
+432 val_432
+435 val_435
+436 val_436
+437 val_437
+438 val_438
+439 val_439
+44 val_44
+443 val_443
+444 val_444
+446 val_446
+448 val_448
+449 val_449
+452 val_452
+453 val_453
+454 val_454
+455 val_455
+457 val_457
+458 val_458
+459 val_459
+460 val_460
+462 val_462
+463 val_463
+466 val_466
+467 val_467
+468 val_468
+469 val_469
+47 val_47
+470 val_470
+472 val_472
+475 val_475
+477 val_477
+478 val_478
+479 val_479
+480 val_480
+481 val_481
+482 val_482
+483 val_483
+484 val_484
+485 val_485
+487 val_487
+489 val_489
+490 val_490
+491 val_491
+492 val_492
+493 val_493
+494 val_494
+495 val_495
+496 val_496
+497 val_497
+498 val_498
+5 val_5
+51 val_51
+53 val_53
+54 val_54
+57 val_57
+58 val_58
+64 val_64
+65 val_65
+66 val_66
+67 val_67
+69 val_69
+70 val_70
+72 val_72
+74 val_74
+76 val_76
+77 val_77
+78 val_78
+8 val_8
+80 val_80
+82 val_82
+83 val_83
+84 val_84
+85 val_85
+86 val_86
+87 val_87
+9 val_9
+90 val_90
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
+PREHOOK: query: select * from src_b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_b
+#### A masked pattern was here ####
+0 val_0
+10 val_10
+100 val_100
+103 val_103
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+119 val_119
+12 val_12
+120 val_120
+125 val_125
+126 val_126
+128 val_128
+129 val_129
+131 val_131
+133 val_133
+134 val_134
+136 val_136
+137 val_137
+138 val_138
+143 val_143
+145 val_145
+146 val_146
+149 val_149
+15 val_15
+150 val_150
+152 val_152
+153 val_153
+155 val_155
+156 val_156
+157 val_157
+158 val_158
+160 val_160
+162 val_162
+163 val_163
+164 val_164
+165 val_165
+166 val_166
+167 val_167
+168 val_168
+169 val_169
+17 val_17
+170 val_170
+172 val_172
+174 val_174
+175 val_175
+176 val_176
+177 val_177
+178 val_178
+179 val_179
+18 val_18
+180 val_180
+181 val_181
+183 val_183
+186 val_186
+187 val_187
+189 val_189
+19 val_19
+190 val_190
+191 val_191
+192 val_192
+193 val_193
+194 val_194
+195 val_195
+196 val_196
+197 val_197
+199 val_199
+2 val_2
+20 val_20
+200 val_200
+201 val_201
+202 val_202
+203 val_203
+205 val_205
+207 val_207
+208 val_208
+209 val_209
+213 val_213
+214 val_214
+216 val_216
+217 val_217
+218 val_218
+219 val_219
+221 val_221
+222 val_222
+223 val_223
+224 val_224
+226 val_226
+228 val_228
+229 val_229
+230 val_230
+233 val_233
+235 val_235
+237 val_237
+238 val_238
+239 val_239
+24 val_24
+241 val_241
+242 val_242
+244 val_244
+247 val_247
+248 val_248
+249 val_249
+252 val_252
+255 val_255
+256 val_256
+257 val_257
+258 val_258
+26 val_26
+260 val_260
+262 val_262
+263 val_263
+265 val_265
+266 val_266
+27 val_27
+272 val_272
+273 val_273
+274 val_274
+275 val_275
+277 val_277
+278 val_278
+28 val_28
+280 val_280
+281 val_281
+282 val_282
+283 val_283
+284 val_284
+285 val_285
+286 val_286
+287 val_287
+288 val_288
+289 val_289
+291 val_291
+292 val_292
+296 val_296
+298 val_298
+30 val_30
+302 val_302
+305 val_305
+306 val_306
+307 val_307
+308 val_308
+309 val_309
+310 val_310
+311 val_311
+315 val_315
+316 val_316
+317 val_317
+318 val_318
+321 val_321
+322 val_322
+323 val_323
+325 val_325
+327 val_327
+33 val_33
+331 val_331
+332 val_332
+333 val_333
+335 val_335
+336 val_336
+338 val_338
+339 val_339
+34 val_34
+341 val_341
+342 val_342
+344 val_344
+345 val_345
+348 val_348
+35 val_35
+351 val_351
+353 val_353
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+365 val_365
+366 val_366
+367 val_367
+368 val_368
+369 val_369
+37 val_37
+373 val_373
+374 val_374
+375 val_375
+377 val_377
+378 val_378
+379 val_379
+382 val_382
+384 val_384
+386 val_386
+389 val_389
+392 val_392
+393 val_393
+394 val_394
+395 val_395
+396 val_396
+397 val_397
+399 val_399
+4 val_4
+400 val_400
+401 val_401
+402 val_402
+403 val_403
+404 val_404
+406 val_406
+407 val_407
+409 val_409
+41 val_41
+411 val_411
+413 val_413
+414 val_414
+417 val_417
+418 val_418
+419 val_419
+42 val_42
+421 val_421
+424 val_424
+427 val_427
+429 val_429
+43 val_43
+430 val_430
+431 val_431
+432 val_432
+435 val_435
+436 val_436
+437 val_437
+438 val_438
+439 val_439
+44 val_44
+443 val_443
+444 val_444
+446 val_446
+448 val_448
+449 val_449
+452 val_452
+453 val_453
+454 val_454
+455 val_455
+457 val_457
+458 val_458
+459 val_459
+460 val_460
+462 val_462
+463 val_463
+466 val_466
+467 val_467
+468 val_468
+469 val_469
+47 val_47
+470 val_470
+472 val_472
+475 val_475
+477 val_477
+478 val_478
+479 val_479
+480 val_480
+481 val_481
+482 val_482
+483 val_483
+484 val_484
+485 val_485
+487 val_487
+489 val_489
+490 val_490
+491 val_491
+492 val_492
+493 val_493
+494 val_494
+495 val_495
+496 val_496
+497 val_497
+498 val_498
+5 val_5
+51 val_51
+53 val_53
+54 val_54
+57 val_57
+58 val_58
+64 val_64
+65 val_65
+66 val_66
+67 val_67
+69 val_69
+70 val_70
+72 val_72
+74 val_74
+76 val_76
+77 val_77
+78 val_78
+8 val_8
+80 val_80
+82 val_82
+83 val_83
+84 val_84
+85 val_85
+86 val_86
+87 val_87
+9 val_9
+90 val_90
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
[28/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out
new file mode 100644
index 0000000..4f291dd
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out
@@ -0,0 +1,2626 @@
+PREHOOK: query: create table over1k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ row format delimited
+ fields terminated by '|'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k
+POSTHOOK: query: create table over1k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ row format delimited
+ fields terminated by '|'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k
+PREHOOK: query: load data local inpath '../../data/files/over1k' into table over1k
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@over1k
+POSTHOOK: query: load data local inpath '../../data/files/over1k' into table over1k
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@over1k
+PREHOOK: query: create table over1k_orc like over1k
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_orc
+POSTHOOK: query: create table over1k_orc like over1k
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_orc
+PREHOOK: query: alter table over1k_orc set fileformat orc
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@over1k_orc
+PREHOOK: Output: default@over1k_orc
+POSTHOOK: query: alter table over1k_orc set fileformat orc
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@over1k_orc
+POSTHOOK: Output: default@over1k_orc
+PREHOOK: query: insert overwrite table over1k_orc select * from over1k
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_orc
+POSTHOOK: query: insert overwrite table over1k_orc select * from over1k
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_orc
+POSTHOOK: Lineage: over1k_orc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_orc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: over1k_orc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: over1k_orc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: over1k_orc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: over1k_orc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_orc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_orc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: over1k_orc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_orc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: over1k_orc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+PREHOOK: query: create table over1k_part_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part_orc
+POSTHOOK: query: create table over1k_part_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part_orc
+PREHOOK: query: create table over1k_part_limit_orc like over1k_part_orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part_limit_orc
+POSTHOOK: query: create table over1k_part_limit_orc like over1k_part_orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part_limit_orc
+PREHOOK: query: alter table over1k_part_limit_orc set fileformat orc
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@over1k_part_limit_orc
+PREHOOK: Output: default@over1k_part_limit_orc
+POSTHOOK: query: alter table over1k_part_limit_orc set fileformat orc
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@over1k_part_limit_orc
+POSTHOOK: Output: default@over1k_part_limit_orc
+PREHOOK: query: create table over1k_part_buck_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si) into 4 buckets stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part_buck_orc
+POSTHOOK: query: create table over1k_part_buck_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si) into 4 buckets stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part_buck_orc
+PREHOOK: query: create table over1k_part_buck_sort_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si)
+ sorted by (f) into 4 buckets stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part_buck_sort_orc
+POSTHOOK: query: create table over1k_part_buck_sort_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si)
+ sorted by (f) into 4 buckets stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part_buck_sort_orc
+PREHOOK: query: -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization
+explain insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization
+explain insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: smallint)
+ sort order: +
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: smallint), VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: float), VALUE._col3 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), _col0 (type: smallint)
+ sort order: ++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint)
+ sort order: +
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_limit_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_limit_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), '_bucket_number' (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint), VALUE._bucket_number (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _bucket_number
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_buck_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_buck_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float)
+ sort order: +++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint), VALUE._bucket_number (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _bucket_number
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_buck_sort_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_buck_sort_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_orc
+PREHOOK: Output: default@over1k_part_orc@ds=foo
+POSTHOOK: query: insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_orc
+POSTHOOK: Output: default@over1k_part_orc@ds=foo/t=27
+POSTHOOK: Output: default@over1k_part_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=27).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=27).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=27).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=27).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert overwrite table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_orc
+PREHOOK: Output: default@over1k_part_limit_orc@ds=foo
+POSTHOOK: query: insert overwrite table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_orc
+POSTHOOK: Output: default@over1k_part_limit_orc@ds=foo/t=27
+POSTHOOK: Output: default@over1k_part_limit_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=27).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=27).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=27).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=27).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert overwrite table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_orc
+PREHOOK: Output: default@over1k_part_buck_orc
+POSTHOOK: query: insert overwrite table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_orc
+POSTHOOK: Output: default@over1k_part_buck_orc@t=27
+POSTHOOK: Output: default@over1k_part_buck_orc@t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=27).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=27).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=27).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=27).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert overwrite table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_orc
+PREHOOK: Output: default@over1k_part_buck_sort_orc
+POSTHOOK: query: insert overwrite table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_orc
+POSTHOOK: Output: default@over1k_part_buck_sort_orc@t=27
+POSTHOOK: Output: default@over1k_part_buck_sort_orc@t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=27).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=27).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=27).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=27).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
+explain insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
+explain insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: smallint)
+ sort order: +
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: smallint), VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: float), VALUE._col3 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), _col0 (type: smallint)
+ sort order: ++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert into table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint)
+ sort order: +
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_limit_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_limit_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert into table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), '_bucket_number' (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint), VALUE._bucket_number (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _bucket_number
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_buck_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ t
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_buck_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert into table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float)
+ sort order: +++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint), VALUE._bucket_number (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _bucket_number
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_buck_sort_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ t
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.over1k_part_buck_sort_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_orc
+PREHOOK: Output: default@over1k_part_orc@ds=foo
+POSTHOOK: query: insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_orc
+POSTHOOK: Output: default@over1k_part_orc@ds=foo/t=27
+POSTHOOK: Output: default@over1k_part_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=27).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=27).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=27).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=27).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert into table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_orc
+PREHOOK: Output: default@over1k_part_limit_orc@ds=foo
+POSTHOOK: query: insert into table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_orc
+POSTHOOK: Output: default@over1k_part_limit_orc@ds=foo/t=27
+POSTHOOK: Output: default@over1k_part_limit_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=27).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=27).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=27).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=27).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit_orc PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert into table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_orc
+PREHOOK: Output: default@over1k_part_buck_orc
+POSTHOOK: query: insert into table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_orc
+POSTHOOK: Output: default@over1k_part_buck_orc@t=27
+POSTHOOK: Output: default@over1k_part_buck_orc@t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=27).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=27).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=27).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=27).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert into table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_orc
+PREHOOK: Output: default@over1k_part_buck_sort_orc
+POSTHOOK: query: insert into table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_orc
+POSTHOOK: Output: default@over1k_part_buck_sort_orc@t=27
+POSTHOOK: Output: default@over1k_part_buck_sort_orc@t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=27).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=27).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=27).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=27).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: desc formatted over1k_part_orc partition(ds="foo",t=27)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_orc
+POSTHOOK: query: desc formatted over1k_part_orc partition(ds="foo",t=27)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_orc
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [foo, 27]
+Database: default
+Table: over1k_part_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 2
+ numRows 32
+ rawDataSize 640
+ totalSize 1400
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_orc
+POSTHOOK: query: desc formatted over1k_part_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_orc
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [foo, __HIVE_DEFAULT_PARTITION__]
+Database: default
+Table: over1k_part_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 2
+ numRows 6
+ rawDataSize 120
+ totalSize 1102
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_limit_orc partition(ds="foo",t=27)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_limit_orc
+POSTHOOK: query: desc formatted over1k_part_limit_orc partition(ds="foo",t=27)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_limit_orc
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [foo, 27]
+Database: default
+Table: over1k_part_limit_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 2
+ numRows 14
+ rawDataSize 280
+ totalSize 1216
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_limit_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_limit_orc
+POSTHOOK: query: desc formatted over1k_part_limit_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_limit_orc
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [foo, __HIVE_DEFAULT_PARTITION__]
+Database: default
+Table: over1k_part_limit_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 2
+ numRows 6
+ rawDataSize 120
+ totalSize 1102
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_buck_orc partition(t=27)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_buck_orc
+POSTHOOK: query: desc formatted over1k_part_buck_orc partition(t=27)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_buck_orc
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [27]
+Database: default
+Table: over1k_part_buck_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 8
+ numRows 32
+ rawDataSize 640
+ totalSize 4548
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [si]
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_buck_orc partition(t="__HIVE_DEFAULT_PARTITION__")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_buck_orc
+POSTHOOK: query: desc formatted over1k_part_buck_orc partition(t="__HIVE_DEFAULT_PARTITION__")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_buck_orc
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [__HIVE_DEFAULT_PARTITION__]
+Database: default
+Table: over1k_part_buck_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 8
+ numRows 6
+ rawDataSize 120
+ totalSize 2212
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [si]
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t=27)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_buck_sort_orc
+POSTHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t=27)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_buck_sort_orc
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [27]
+Database: default
+Table: over1k_part_buck_sort_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 8
+ numRows 32
+ rawDataSize 640
+ totalSize 4534
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [si]
+Sort Columns: [Order(col:f, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t="__HIVE_DEFAULT_PARTITION__")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_buck_sort_orc
+POSTHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t="__HIVE_DEFAULT_PARTITION__")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_buck_sort_orc
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [__HIVE_DEFAULT_PARTITION__]
+Database: default
+Table: over1k_part_buck_sort_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 8
+ numRows 6
+ rawDataSize 120
+ totalSize 2212
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [si]
+Sort Columns: [Order(col:f, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select count(*) from over1k_part_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_part_orc
+PREHOOK: Input: default@over1k_part_orc@ds=foo/t=27
+PREHOOK: Input: default@over1k_part_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from over1k_part_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_part_orc
+POSTHOOK: Input: default@over1k_part_orc@ds=foo/t=27
+POSTHOOK: Input: default@over1k_part_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+38
+PREHOOK: query: select count(*) from over1k_part_limit_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_part_limit_orc
+PREHOOK: Input: default@over1k_part_limit_orc@ds=foo/t=27
+PREHOOK: Input: default@over1k_part_limit_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from over1k_part_limit_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_part_limit_orc
+POSTHOOK: Input: default@over1k_part_limit_orc@ds=foo/t=27
+POSTHOOK: Input: default@over1k_part_limit_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+20
+PREHOOK: query: select count(*) from over1k_part_buck_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_part_buck_orc
+PREHOOK: Input: default@over1k_part_buck_orc@t=27
+PREHOOK: Input: default@over1k_part_buck_orc@t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from over1k_part_buck_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_part_buck_orc
+POSTHOOK: Input: default@over1k_part_buck_orc@t=27
+POSTHOOK: Input: default@over1k_part_buck_orc@t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+38
+PREHOOK: query: select count(*) from over1k_part_buck_sort_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_part_buck_sort_orc
+PREHOOK: Input: default@over1k_part_buck_sort_orc@t=27
+PREHOOK: Input: default@over1k_part_buck_sort_orc@t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from over1k_part_buck_sort_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_part_buck_sort_orc
+POSTHOOK: Input: default@over1k_part_buck_sort_orc@t=27
+POSTHOOK: Input: default@over1k_part_buck_sort_orc@t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+38
+PREHOOK: query: -- tests for HIVE-6883
+create table over1k_part2_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part2_orc
+POSTHOOK: query: -- tests for HIVE-6883
+create table over1k_part2_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part2_orc
+PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: int)
+ sort order: +
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: float), VALUE._col3 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: int)
+ sort order: +
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: float), VALUE._col3 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), _col1 (type: int)
+ sort order: ++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: int)
+ sort order: +
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (_col0 is null or (_col0 = 27)) (type: boolean)
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint)
+ sort order: +
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k_orc
+ Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ sort order: +++++
+ Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator
+explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t
+PREHOOK: type: QUERY
+POSTHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator
+explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ S
<TRUNCATED>
[37/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
new file mode 100644
index 0000000..111aaaa
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
@@ -0,0 +1,686 @@
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab_part
+POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab_part
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab
+POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (CUSTOM_EDGE), Map 3 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key is not null and value is not null) (type: boolean)
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col1 (type: string)
+ 1 value (type: string)
+ outputColumnNames: _col0, _col12
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col12 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: int)
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab1
+POSTHOOK: query: CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab1
+PREHOOK: query: insert overwrite table tab1
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab1
+POSTHOOK: query: insert overwrite table tab1
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab1
+POSTHOOK: Lineage: tab1.key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab1.value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select a.key, a.value, b.value
+from tab1 a join src b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, a.value, b.value
+from tab1 a join src b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: UDFToDouble(key) is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: UDFToDouble(key) (type: double)
+ sort order: +
+ Map-reduce partition columns: UDFToDouble(key) (type: double)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: int), value (type: string)
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: UDFToDouble(key) is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 UDFToDouble(key) (type: double)
+ 1 UDFToDouble(key) (type: double)
+ outputColumnNames: _col0, _col1, _col6
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select a.key, b.key from (select key from tab_part where key > 1) a join (select key from tab_part where key > 2) b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, b.key from (select key from tab_part where key > 1) a join (select key from tab_part where key > 2) b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (CUSTOM_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: tab_part
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key > 1) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: tab_part
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key > 2) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select a.key, b.key from (select key from tab_part where key > 1) a left outer join (select key from tab_part where key > 2) b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, b.key from (select key from tab_part where key > 1) a left outer join (select key from tab_part where key > 2) b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (CUSTOM_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: tab_part
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key > 1) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: tab_part
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key > 2) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select a.key, b.key from (select key from tab_part where key > 1) a right outer join (select key from tab_part where key > 2) b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, b.key from (select key from tab_part where key > 1) a right outer join (select key from tab_part where key > 2) b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (CUSTOM_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: tab_part
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key > 1) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: tab_part
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key > 2) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select a.key, b.key from (select distinct key from tab) a join tab b on b.key = a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key, b.key from (select distinct key from tab) a join tab b on b.key = a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 3 <- Reducer 2 (CUSTOM_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: tab
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: key (type: int)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 0 Reducer 2
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 60 Data size: 636 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 60 Data size: 636 Basic stats: COMPLETE Column stats: NONE
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select a.value, b.value from (select distinct value from tab) a join tab b on b.key = a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value, b.value from (select distinct value from tab) a join tab b on b.key = a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 3 <- Reducer 2 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: tab
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: UDFToDouble(value) is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: value (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: UDFToDouble(key) is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 UDFToDouble(_col0) (type: double)
+ 1 UDFToDouble(key) (type: double)
+ outputColumnNames: _col0, _col2
+ input vertices:
+ 0 Reducer 2
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: string), _col2 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 60 Data size: 636 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: UDFToDouble(_col0) (type: double)
+ sort order: +
+ Map-reduce partition columns: UDFToDouble(_col0) (type: double)
+ Statistics: Num rows: 60 Data size: 636 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_gby.q.out b/ql/src/test/results/clientpositive/llap/cbo_gby.q.out
new file mode 100644
index 0000000..04597a7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_gby.q.out
@@ -0,0 +1,124 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 6. Test Select + TS + Join + Fil + GB + GB Having
+select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 6. Test Select + TS + Join + Fil + GB + GB Having
+select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+ 1 4 2
+ 1 4 2
+1 4 12
+1 4 2
+NULL NULL NULL
+PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+5.0 12 1
+5.0 2 3
+NULL NULL 1
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 12 6
+1 2 6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 12 6
+1 2 6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 12 6
+1 2 6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 12 6
+1 2 6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 12 6
+1 2 6
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_gby_empty.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_gby_empty.q.out b/ql/src/test/results/clientpositive/llap/cbo_gby_empty.q.out
new file mode 100644
index 0000000..68f0255
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_gby_empty.q.out
@@ -0,0 +1,77 @@
+PREHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr
+select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr
+select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+tst1
+PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'tst1' as key, count(1) as value from src) unionsrc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+tst1 500
+PREHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
+ UNION ALL
+ select 'min' as key, min(c_int) as value from cbo_t3 s2
+ UNION ALL
+ select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
+ UNION ALL
+ select 'min' as key, min(c_int) as value from cbo_t3 s2
+ UNION ALL
+ select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+avg
+max
+min
+PREHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
+ UNION ALL
+ select 'min' as key, min(c_int) as value from cbo_t3 s2
+ UNION ALL
+ select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select unionsrc.key, unionsrc.value FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
+ UNION ALL
+ select 'min' as key, min(c_int) as value from cbo_t3 s2
+ UNION ALL
+ select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc order by unionsrc.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+avg 1.5
+max 3.0
+min 1.0
+PREHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
+ UNION ALL
+ select 'min' as key, min(c_int) as value from cbo_t3 s2
+ UNION ALL
+ select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1
+ UNION ALL
+ select 'min' as key, min(c_int) as value from cbo_t3 s2
+ UNION ALL
+ select 'avg' as key, avg(c_int) as value from cbo_t3 s3) unionsrc group by unionsrc.key order by unionsrc.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+avg 1
+max 1
+min 1
[47/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_join_filters.q.out b/ql/src/test/results/clientpositive/llap/auto_join_filters.q.out
new file mode 100644
index 0000000..8fde41d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_join_filters.q.out
@@ -0,0 +1,540 @@
+PREHOOK: query: CREATE TABLE myinput1(key int, value int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@myinput1
+POSTHOOK: query: CREATE TABLE myinput1(key int, value int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@myinput1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@myinput1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@myinput1
+Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Map 1' is a cross product
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+Warning: Map Join MAPJOIN[15][bigTable=a] in task 'Map 1' is a cross product
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4937935
+Warning: Map Join MAPJOIN[15][bigTable=b] in task 'Map 2' is a cross product
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3080335
+Warning: Shuffle Join MERGEJOIN[14][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4939870
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4937935
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4937935
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4937935
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4937935
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3080335
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3080335
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3080335
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3080335
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4939870
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4939870
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4939870
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+4939870
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3080335
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3080335
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_input1
+POSTHOOK: query: CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_input1
+PREHOOK: query: CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_input2
+POSTHOOK: query: CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_input2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_input1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_input1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_input1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_input1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input2
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_input2
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input2
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_input2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input2
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_input2
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input2
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_input2
+Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Map 1' is a cross product
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Map 1' is a cross product
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Map 2' is a cross product
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key = b.key and a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key=b.key and a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key = b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.value = b.value and a.key=b.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.value=c.value AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.value = c.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a LEFT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) RIGHT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) from myinput1 a RIGHT OUTER JOIN myinput1 b ON (a.value=b.value AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value) LEFT OUTER JOIN myinput1 c ON (b.key=c.key AND c.key > 40 AND c.value > 50 AND c.key = c.value AND b.key > 40 AND b.value > 50 AND b.key = b.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
+PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b RIGHT OUTER JOIN myinput1 c ON a.value = b.value and b.key = c.key AND a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value AND c.key > 40 AND c.value > 50 AND c.key = c.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+3078400
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out
new file mode 100644
index 0000000..9a3b7fe
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out
@@ -0,0 +1,1034 @@
+PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket
+
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket
+
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 1 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 0 Map 1
+ Position of Big Table: 1
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [b]
+ /bucket_big/ds=2008-04-09 [b]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+38
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ /bucket_big/ds=2008-04-09 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+38
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ /bucket_big/ds=2008-04-09 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+38
[36/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cbo_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_join.q.out b/ql/src/test/results/clientpositive/llap/cbo_join.q.out
new file mode 100644
index 0000000..c5e9858
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_join.q.out
@@ -0,0 +1,15028 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+-- 4. Test Select + Join + TS
+select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+-- 4. Test Select + Join + TS
+select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+NULL
+PREHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+1
+PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+NULL NULL
+NULL NULL
+PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+NULL 2
+NULL 2
+NULL 2
+NULL 2
+NULL 2
+NULL NULL
+NULL NULL
+PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+NULL 2
+NULL 2
+NULL 2
+NULL 2
+NULL 2
+NULL NULL
+NULL NULL
+NULL NULL
+NULL NULL
+PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+PREHOOK: query: select a, cbo_t1.b, key, cbo_t2.c_int, cbo_t3.p from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=key join (select key as p, c_int as q, cbo_t3.c_float as r from cbo_t3)cbo_t3 on cbo_t1.a=cbo_t3.p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a, cbo_t1.b, key, cbo_t2.c_int, cbo_t3.p from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=key join (select key as p, c_int as q, cbo_t3.c_float as r from cbo_t3)cbo_t3 on cbo_t1.a=cbo_t3.p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+ 1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+1 1 1 1 1
+PREHOOK: query: select b, cbo_t1.c, cbo_t2.c_int, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select b, cbo_t1.c, cbo_t2.c_int, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+1 1.0 1 1
+PREHOOK: query: select cbo_t3.c_int, b, cbo_t2.c_int, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, b, cbo_t2.c_int, cbo_t1.c from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join cbo_t2 on cbo_t1.a=cbo_t2.key join cbo_t3 on cbo_t1.a=cbo_t3.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+1 1 1 1.0
+PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 left outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p left outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p left outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+1 1.0 1 1 1
+PREHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p full outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p full outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+ 1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+1 1 1 1
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+NULL NULL NULL NULL
+PREHOOK: query: -- 5. Test Select + Join + FIL + TS
+select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- 5. Test Select + Join + FIL + TS
+select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 right outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+#### A masked pattern was here ####
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+1 1
+PREHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or cbo_t2.q >= 0)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select b, cbo_t1.c, cbo_t2.p, q, cbo_
<TRUNCATED>
[29/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
new file mode 100644
index 0000000..22e0f29
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
@@ -0,0 +1,1114 @@
+PREHOOK: query: create table dim_shops (id int, label string) row format delimited fields terminated by ',' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dim_shops
+POSTHOOK: query: create table dim_shops (id int, label string) row format delimited fields terminated by ',' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dim_shops
+PREHOOK: query: load data local inpath '../../data/files/dim_shops.txt' into table dim_shops
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@dim_shops
+POSTHOOK: query: load data local inpath '../../data/files/dim_shops.txt' into table dim_shops
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@dim_shops
+PREHOOK: query: create table agg_01 (amount decimal) partitioned by (dim_shops_id int) row format delimited fields terminated by ',' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@agg_01
+POSTHOOK: query: create table agg_01 (amount decimal) partitioned by (dim_shops_id int) row format delimited fields terminated by ',' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@agg_01
+PREHOOK: query: alter table agg_01 add partition (dim_shops_id = 1)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@agg_01
+POSTHOOK: query: alter table agg_01 add partition (dim_shops_id = 1)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@agg_01
+POSTHOOK: Output: default@agg_01@dim_shops_id=1
+PREHOOK: query: alter table agg_01 add partition (dim_shops_id = 2)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@agg_01
+POSTHOOK: query: alter table agg_01 add partition (dim_shops_id = 2)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@agg_01
+POSTHOOK: Output: default@agg_01@dim_shops_id=2
+PREHOOK: query: alter table agg_01 add partition (dim_shops_id = 3)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@agg_01
+POSTHOOK: query: alter table agg_01 add partition (dim_shops_id = 3)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@agg_01
+POSTHOOK: Output: default@agg_01@dim_shops_id=3
+PREHOOK: query: load data local inpath '../../data/files/agg_01-p1.txt' into table agg_01 partition (dim_shops_id=1)
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@agg_01@dim_shops_id=1
+POSTHOOK: query: load data local inpath '../../data/files/agg_01-p1.txt' into table agg_01 partition (dim_shops_id=1)
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@agg_01@dim_shops_id=1
+PREHOOK: query: load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2)
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@agg_01@dim_shops_id=2
+POSTHOOK: query: load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2)
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@agg_01@dim_shops_id=2
+PREHOOK: query: load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3)
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@agg_01@dim_shops_id=3
+POSTHOOK: query: load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3)
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@agg_01@dim_shops_id=3
+PREHOOK: query: analyze table dim_shops compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dim_shops
+PREHOOK: Output: default@dim_shops
+POSTHOOK: query: analyze table dim_shops compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dim_shops
+POSTHOOK: Output: default@dim_shops
+PREHOOK: query: analyze table agg_01 partition (dim_shops_id) compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@agg_01
+PREHOOK: Input: default@agg_01@dim_shops_id=1
+PREHOOK: Input: default@agg_01@dim_shops_id=2
+PREHOOK: Input: default@agg_01@dim_shops_id=3
+PREHOOK: Output: default@agg_01
+PREHOOK: Output: default@agg_01@dim_shops_id=1
+PREHOOK: Output: default@agg_01@dim_shops_id=2
+PREHOOK: Output: default@agg_01@dim_shops_id=3
+POSTHOOK: query: analyze table agg_01 partition (dim_shops_id) compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@agg_01
+POSTHOOK: Input: default@agg_01@dim_shops_id=1
+POSTHOOK: Input: default@agg_01@dim_shops_id=2
+POSTHOOK: Input: default@agg_01@dim_shops_id=3
+POSTHOOK: Output: default@agg_01
+POSTHOOK: Output: default@agg_01@dim_shops_id=1
+POSTHOOK: Output: default@agg_01@dim_shops_id=2
+POSTHOOK: Output: default@agg_01@dim_shops_id=3
+PREHOOK: query: select * from dim_shops
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dim_shops
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+1 foo
+2 bar
+3 baz
+PREHOOK: query: select * from agg_01
+PREHOOK: type: QUERY
+PREHOOK: Input: default@agg_01
+PREHOOK: Input: default@agg_01@dim_shops_id=1
+PREHOOK: Input: default@agg_01@dim_shops_id=2
+PREHOOK: Input: default@agg_01@dim_shops_id=3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from agg_01
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@agg_01
+POSTHOOK: Input: default@agg_01@dim_shops_id=1
+POSTHOOK: Input: default@agg_01@dim_shops_id=2
+POSTHOOK: Input: default@agg_01@dim_shops_id=3
+#### A masked pattern was here ####
+1 1
+2 1
+3 1
+4 2
+5 2
+6 2
+7 3
+8 3
+9 3
+PREHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 4 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: agg
+ filterExpr: dim_shops_id is not null (type: boolean)
+ Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 dim_shops_id (type: int)
+ 1 id (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Filter Operator
+ predicate: ((_col1 = _col5) and (_col6) IN ('foo', 'bar')) (type: boolean)
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col6 (type: string), _col0 (type: decimal(10,0))
+ outputColumnNames: _col6, _col0
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(), sum(_col0)
+ keys: _col6 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0))
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ filterExpr: (id is not null and (label) IN ('foo', 'bar')) (type: boolean)
+ Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (id is not null and (label) IN ('foo', 'bar')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: id (type: int)
+ sort order: +
+ Map-reduce partition columns: id (type: int)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ value expressions: label (type: string)
+ Select Operator
+ expressions: id (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: agg
+ Partition key expr: dim_shops_id
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Target column: dim_shops_id
+ Target Vertex: Map 1
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), sum(VALUE._col1)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0))
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0))
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+PREHOOK: type: QUERY
+PREHOOK: Input: default@agg_01
+PREHOOK: Input: default@agg_01@dim_shops_id=1
+PREHOOK: Input: default@agg_01@dim_shops_id=2
+PREHOOK: Input: default@agg_01@dim_shops_id=3
+PREHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@agg_01
+POSTHOOK: Input: default@agg_01@dim_shops_id=1
+POSTHOOK: Input: default@agg_01@dim_shops_id=2
+POSTHOOK: Input: default@agg_01@dim_shops_id=3
+POSTHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+bar 3 15
+foo 3 6
+PREHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 4 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: agg
+ filterExpr: dim_shops_id is not null (type: boolean)
+ Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 dim_shops_id (type: int)
+ 1 id (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Filter Operator
+ predicate: ((_col1 = _col5) and (_col6) IN ('foo', 'bar')) (type: boolean)
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col6 (type: string), _col0 (type: decimal(10,0))
+ outputColumnNames: _col6, _col0
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(), sum(_col0)
+ keys: _col6 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0))
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ filterExpr: (id is not null and (label) IN ('foo', 'bar')) (type: boolean)
+ Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (id is not null and (label) IN ('foo', 'bar')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: id (type: int)
+ sort order: +
+ Map-reduce partition columns: id (type: int)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ value expressions: label (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), sum(VALUE._col1)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0))
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0))
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+PREHOOK: type: QUERY
+PREHOOK: Input: default@agg_01
+PREHOOK: Input: default@agg_01@dim_shops_id=1
+PREHOOK: Input: default@agg_01@dim_shops_id=2
+PREHOOK: Input: default@agg_01@dim_shops_id=3
+PREHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@agg_01
+POSTHOOK: Input: default@agg_01@dim_shops_id=1
+POSTHOOK: Input: default@agg_01@dim_shops_id=2
+POSTHOOK: Input: default@agg_01@dim_shops_id=3
+POSTHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+bar 3 15
+foo 3 6
+PREHOOK: query: EXPLAIN SELECT d1.label
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT d1.label
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: agg
+ filterExpr: dim_shops_id is not null (type: boolean)
+ Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 dim_shops_id (type: int)
+ 1 id (type: int)
+ outputColumnNames: _col1, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Filter Operator
+ predicate: (_col1 = _col5) (type: boolean)
+ Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col6 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ filterExpr: id is not null (type: boolean)
+ Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: id is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: id (type: int)
+ sort order: +
+ Map-reduce partition columns: id (type: int)
+ Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+ value expressions: label (type: string)
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT d1.label
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@agg_01
+PREHOOK: Input: default@agg_01@dim_shops_id=1
+PREHOOK: Input: default@agg_01@dim_shops_id=2
+PREHOOK: Input: default@agg_01@dim_shops_id=3
+PREHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT d1.label
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@agg_01
+POSTHOOK: Input: default@agg_01@dim_shops_id=1
+POSTHOOK: Input: default@agg_01@dim_shops_id=2
+POSTHOOK: Input: default@agg_01@dim_shops_id=3
+POSTHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+foo
+foo
+foo
+bar
+bar
+bar
+baz
+baz
+baz
+PREHOOK: query: EXPLAIN SELECT agg.amount
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and agg.dim_shops_id = 1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT agg.amount
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and agg.dim_shops_id = 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: agg
+ filterExpr: (dim_shops_id = 1) (type: boolean)
+ Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 1 (type: int)
+ 1 1 (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ filterExpr: (id = 1) (type: boolean)
+ Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (id = 1) (type: boolean)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: 1 (type: int)
+ sort order: +
+ Map-reduce partition columns: 1 (type: int)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT agg.amount
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and agg.dim_shops_id = 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@agg_01
+PREHOOK: Input: default@agg_01@dim_shops_id=1
+PREHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT agg.amount
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and agg.dim_shops_id = 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@agg_01
+POSTHOOK: Input: default@agg_01@dim_shops_id=1
+POSTHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+1
+2
+3
+PREHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 4 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: agg
+ filterExpr: dim_shops_id is not null (type: boolean)
+ Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 dim_shops_id (type: int)
+ 1 id (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Filter Operator
+ predicate: ((_col1 = _col5) and (_col6) IN ('foo', 'bar')) (type: boolean)
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col6 (type: string), _col0 (type: decimal(10,0))
+ outputColumnNames: _col6, _col0
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(), sum(_col0)
+ keys: _col6 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0))
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ filterExpr: (id is not null and (label) IN ('foo', 'bar')) (type: boolean)
+ Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (id is not null and (label) IN ('foo', 'bar')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: id (type: int)
+ sort order: +
+ Map-reduce partition columns: id (type: int)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ value expressions: label (type: string)
+ Select Operator
+ expressions: id (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: agg
+ Partition key expr: dim_shops_id
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Target column: dim_shops_id
+ Target Vertex: Map 1
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), sum(VALUE._col1)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0))
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0))
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+PREHOOK: type: QUERY
+PREHOOK: Input: default@agg_01
+PREHOOK: Input: default@agg_01@dim_shops_id=1
+PREHOOK: Input: default@agg_01@dim_shops_id=2
+PREHOOK: Input: default@agg_01@dim_shops_id=3
+PREHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT d1.label, count(*), sum(agg.amount)
+FROM agg_01 agg,
+dim_shops d1
+WHERE agg.dim_shops_id = d1.id
+and
+d1.label in ('foo', 'bar')
+GROUP BY d1.label
+ORDER BY d1.label
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@agg_01
+POSTHOOK: Input: default@agg_01@dim_shops_id=1
+POSTHOOK: Input: default@agg_01@dim_shops_id=2
+POSTHOOK: Input: default@agg_01@dim_shops_id=3
+POSTHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+bar 3 15
+foo 3 6
+PREHOOK: query: EXPLAIN
+SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo'
+UNION ALL
+SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo'
+UNION ALL
+SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE), Union 2 (CONTAINS)
+ Map 4 <- Map 5 (BROADCAST_EDGE), Union 2 (CONTAINS)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: agg_01
+ filterExpr: dim_shops_id is not null (type: boolean)
+ Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 dim_shops_id (type: int)
+ 1 id (type: int)
+ outputColumnNames: _col0, _col1, _col5
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Filter Operator
+ predicate: (_col1 = _col5) (type: boolean)
+ Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: decimal(10,0))
+ outputColumnNames: _col0
+ Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 8 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: dim_shops
+ filterExpr: (id is not null and (label = 'foo')) (type: boolean)
+ Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (id is not null and (label = 'foo')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: id (type: int)
+ sort order: +
+ Map-reduce partition columns: id (type: int)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: id (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: agg_01
+ Partition key expr: dim_shops_id
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Target column: dim_shops_id
+ Target Vertex: Map 1
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: agg_01
+ filterExpr: dim_shops_id is not null (type: boolean)
+ Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 dim_shops_id (type: int)
+ 1 id (type: int)
+ outputColumnNames: _col0, _col1, _col5
+ input vertices:
+ 1 Map 5
+ Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Filter Operator
+ predicate: (_col1 = _col5) (type: boolean)
+ Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: decimal(10,0))
+ outputColumnNames: _col0
+ Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 8 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: dim_shops
+ filterExpr: (id is not null and (label = 'bar')) (type: boolean)
+ Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (id is not null and (label = 'bar')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: id (type: int)
+ sort order: +
+ Map-reduce partition columns: id (type: int)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: id (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: agg_01
+ Partition key expr: dim_shops_id
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Target column: dim_shops_id
+ Target Vertex: Map 4
+ Execution mode: llap
+ Union 2
+ Vertex: Union 2
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo'
+UNION ALL
+SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@agg_01
+PREHOOK: Input: default@agg_01@dim_shops_id=1
+PREHOOK: Input: default@agg_01@dim_shops_id=2
+PREHOOK: Input: default@agg_01@dim_shops_id=3
+PREHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo'
+UNION ALL
+SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@agg_01
+POSTHOOK: Input: default@agg_01@dim_shops_id=1
+POSTHOOK: Input: default@agg_01@dim_shops_id=2
+POSTHOOK: Input: default@agg_01@dim_shops_id=3
+POSTHOOK: Input: default@dim_shops
+#### A masked pattern was here ####
+1
+2
+3
+4
+5
+6
+PREHOOK: query: -- Dynamic partition pruning will be removed as data size exceeds the limit;
+-- and for self join on partitioning column, it should not fail (HIVE-10559).
+explain
+select count(*)
+from srcpart s1,
+ srcpart s2
+where s1.ds = s2.ds
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Dynamic partition pruning will be removed as data size exceeds the limit;
+-- and for self join on partitioning column, it should not fail (HIVE-10559).
+explain
+select count(*)
+from srcpart s1,
+ srcpart s2
+where s1.ds = s2.ds
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ filterExpr: ds is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ds (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ filterExpr: ds is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ds (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*)
+from srcpart s1,
+ srcpart s2
+where s1.ds = s2.ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from srcpart s1,
+ srcpart s2
+where s1.ds = s2.ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+2000000
[20/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_into2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_into2.q.out b/ql/src/test/results/clientpositive/llap/insert_into2.q.out
new file mode 100644
index 0000000..6eb8129
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_into2.q.out
@@ -0,0 +1,440 @@
+PREHOOK: query: DROP TABLE insert_into2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE insert_into2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE insert_into2 (key int, value string)
+ PARTITIONED BY (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@insert_into2
+POSTHOOK: query: CREATE TABLE insert_into2 (key int, value string)
+ PARTITIONED BY (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@insert_into2
+PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1')
+ SELECT * FROM src order by key LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1')
+ SELECT * FROM src order by key LIMIT 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 100
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 1
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src order by key limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into2@ds=1
+POSTHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src order by key limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into2@ds=1
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select count (*) from insert_into2 where ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count (*) from insert_into2 where ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count (*) from insert_into2 where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+POSTHOOK: query: select count (*) from insert_into2 where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+100
+PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src order by key limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into2@ds=1
+POSTHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src order by key limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into2@ds=1
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+200
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+PREHOOK: Input: default@insert_into2@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+POSTHOOK: Input: default@insert_into2@ds=1
+#### A masked pattern was here ####
+-39568181484
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+ SELECT * FROM src order by key LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+ SELECT * FROM src order by key LIMIT 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 100
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 2
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+ SELECT * FROM src order by key LIMIT 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+ SELECT * FROM src order by key LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+PREHOOK: Input: default@insert_into2@ds=1
+PREHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+POSTHOOK: Input: default@insert_into2@ds=1
+POSTHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+-59352272126
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+100
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+ SELECT * FROM src order by key LIMIT 50
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+ SELECT * FROM src order by key LIMIT 50
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 50
+ Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 2
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+ SELECT * FROM src order by key LIMIT 50
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2')
+ SELECT * FROM src order by key LIMIT 50
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into2@ds=2
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+PREHOOK: Input: default@insert_into2@ds=1
+PREHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+POSTHOOK: Input: default@insert_into2@ds=1
+POSTHOOK: Input: default@insert_into2@ds=2
+#### A masked pattern was here ####
+-33609711132
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into2
+#### A masked pattern was here ####
+50
+PREHOOK: query: DROP TABLE insert_into2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@insert_into2
+PREHOOK: Output: default@insert_into2
+POSTHOOK: query: DROP TABLE insert_into2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@insert_into2
+POSTHOOK: Output: default@insert_into2
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_orig_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_orig_table.q.out b/ql/src/test/results/clientpositive/llap/insert_orig_table.q.out
new file mode 100644
index 0000000..5eea74d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_orig_table.q.out
@@ -0,0 +1,80 @@
+PREHOOK: query: create table acid_iot(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_iot
+POSTHOOK: query: create table acid_iot(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_iot
+PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_iot
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@acid_iot
+POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_iot
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@acid_iot
+PREHOOK: query: select count(*) from acid_iot
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_iot
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from acid_iot
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_iot
+#### A masked pattern was here ####
+12288
+PREHOOK: query: insert into table acid_iot select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2,
+ cboolean1, cboolean2 from alltypesorc where cint < 0 order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_iot
+POSTHOOK: query: insert into table acid_iot select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2,
+ cboolean1, cboolean2 from alltypesorc where cint < 0 order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_iot
+POSTHOOK: Lineage: acid_iot.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: acid_iot.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: acid_iot.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: acid_iot.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: acid_iot.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: acid_iot.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_iot.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: acid_iot.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: acid_iot.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: acid_iot.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: acid_iot.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: acid_iot.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: select count(*) from acid_iot
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_iot
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from acid_iot
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_iot
+#### A masked pattern was here ####
+12298
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_update_delete.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_update_delete.q.out b/ql/src/test/results/clientpositive/llap/insert_update_delete.q.out
new file mode 100644
index 0000000..9a3cf4b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_update_delete.q.out
@@ -0,0 +1,78 @@
+PREHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_iud
+POSTHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_iud
+PREHOOK: query: insert into table acid_iud select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_iud
+POSTHOOK: query: insert into table acid_iud select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_iud
+POSTHOOK: Lineage: acid_iud.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_iud.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select a,b from acid_iud order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_iud
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b from acid_iud order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_iud
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa
+-1073051226 A34p7oRr2WvUJNf
+-1072910839 0iqrc5
+-1072081801 dPkN74F7
+-1072076362 2uLyD28144vklju213J1mr
+-1071480828 aw724t8c5558x2xneC624
+-1071363017 Anj0oF
+-1070883071 0ruyd6Y50JpdGRf6HqD
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
+PREHOOK: query: update acid_iud set b = 'fred'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_iud
+PREHOOK: Output: default@acid_iud
+POSTHOOK: query: update acid_iud set b = 'fred'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_iud
+POSTHOOK: Output: default@acid_iud
+PREHOOK: query: select a,b from acid_iud order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_iud
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b from acid_iud order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_iud
+#### A masked pattern was here ####
+-1073279343 fred
+-1073051226 fred
+-1072910839 fred
+-1072081801 fred
+-1072076362 fred
+-1071480828 fred
+-1071363017 fred
+-1070883071 fred
+-1070551679 fred
+-1069736047 fred
+PREHOOK: query: delete from acid_iud
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_iud
+PREHOOK: Output: default@acid_iud
+POSTHOOK: query: delete from acid_iud
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_iud
+POSTHOOK: Output: default@acid_iud
+PREHOOK: query: select a,b from acid_iud order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_iud
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b from acid_iud order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_iud
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_values_acid_not_bucketed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_values_acid_not_bucketed.q.out b/ql/src/test/results/clientpositive/llap/insert_values_acid_not_bucketed.q.out
new file mode 100644
index 0000000..4f8ddfa
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_values_acid_not_bucketed.q.out
@@ -0,0 +1,28 @@
+PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_notbucketed
+POSTHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_notbucketed
+PREHOOK: query: insert into table acid_notbucketed values (1, 'abc'), (2, 'def')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@acid_notbucketed
+POSTHOOK: query: insert into table acid_notbucketed values (1, 'abc'), (2, 'def')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@acid_notbucketed
+POSTHOOK: Lineage: acid_notbucketed.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: acid_notbucketed.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: select * from acid_notbucketed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_notbucketed
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_notbucketed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_notbucketed
+#### A masked pattern was here ####
+1 abc
+2 def
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_values_dynamic_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_values_dynamic_partitioned.q.out b/ql/src/test/results/clientpositive/llap/insert_values_dynamic_partitioned.q.out
new file mode 100644
index 0000000..773feb4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_values_dynamic_partitioned.q.out
@@ -0,0 +1,45 @@
+PREHOOK: query: create table ivdp(i int,
+ de decimal(5,2),
+ vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ivdp
+POSTHOOK: query: create table ivdp(i int,
+ de decimal(5,2),
+ vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ivdp
+PREHOOK: query: insert into table ivdp partition (ds) values
+ (1, 109.23, 'and everywhere that mary went', 'today'),
+ (6553, 923.19, 'the lamb was sure to go', 'tomorrow')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@ivdp
+POSTHOOK: query: insert into table ivdp partition (ds) values
+ (1, 109.23, 'and everywhere that mary went', 'today'),
+ (6553, 923.19, 'the lamb was sure to go', 'tomorrow')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@ivdp@ds=today
+POSTHOOK: Output: default@ivdp@ds=tomorrow
+POSTHOOK: Lineage: ivdp PARTITION(ds=today).de EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: ivdp PARTITION(ds=today).i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: ivdp PARTITION(ds=today).vc EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: ivdp PARTITION(ds=tomorrow).de EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: ivdp PARTITION(ds=tomorrow).i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: ivdp PARTITION(ds=tomorrow).vc EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+PREHOOK: query: select * from ivdp order by ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ivdp
+PREHOOK: Input: default@ivdp@ds=today
+PREHOOK: Input: default@ivdp@ds=tomorrow
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ivdp order by ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ivdp
+POSTHOOK: Input: default@ivdp@ds=today
+POSTHOOK: Input: default@ivdp@ds=tomorrow
+#### A masked pattern was here ####
+1 109.23 and everywhere that mary went today
+6553 923.19 the lamb was sure to go tomorrow
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_values_non_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_values_non_partitioned.q.out b/ql/src/test/results/clientpositive/llap/insert_values_non_partitioned.q.out
new file mode 100644
index 0000000..5b1c3cc
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_values_non_partitioned.q.out
@@ -0,0 +1,70 @@
+PREHOOK: query: create table acid_ivnp(ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(5,2),
+ t timestamp,
+ dt date,
+ b boolean,
+ s string,
+ vc varchar(128),
+ ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_ivnp
+POSTHOOK: query: create table acid_ivnp(ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(5,2),
+ t timestamp,
+ dt date,
+ b boolean,
+ s string,
+ vc varchar(128),
+ ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_ivnp
+PREHOOK: query: insert into table acid_ivnp values
+ (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'),
+ (null, null, null, null, null, null, null, null, null, null, null, null, null),
+ (3, 25, 6553, null, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', false, 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@acid_ivnp
+POSTHOOK: query: insert into table acid_ivnp values
+ (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'),
+ (null, null, null, null, null, null, null, null, null, null, null, null, null),
+ (3, 25, 6553, null, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', false, 'its fleece was white as snow', 'a pocket full of posies', 'blue' )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@acid_ivnp
+POSTHOOK: Lineage: acid_ivnp.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.bi EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.ch EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col13, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.d EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col6, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.de EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col7, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.dt EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col9, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.f EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.s SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col11, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.si EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.t EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col8, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.ti EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivnp.vc EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col12, type:string, comment:), ]
+PREHOOK: query: select * from acid_ivnp order by ti
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_ivnp
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_ivnp order by ti
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_ivnp
+#### A masked pattern was here ####
+NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+1 257 65537 4294967297 3.14 3.141592654 109.23 2014-08-25 17:21:30 2014-08-25 true mary had a little lamb ring around the rosie red
+3 25 6553 NULL 0.14 1923.141592654 1.23 2014-08-24 17:21:30 2014-08-26 false its fleece was white as snow a pocket full of posies blue
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_values_orig_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_values_orig_table.q.out b/ql/src/test/results/clientpositive/llap/insert_values_orig_table.q.out
new file mode 100644
index 0000000..684cd1b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_values_orig_table.q.out
@@ -0,0 +1,82 @@
+PREHOOK: query: create table acid_ivot(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_ivot
+POSTHOOK: query: create table acid_ivot(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+ cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_ivot
+PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@acid_ivot
+POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@acid_ivot
+PREHOOK: query: select count(*) from acid_ivot
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_ivot
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from acid_ivot
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_ivot
+#### A masked pattern was here ####
+12288
+PREHOOK: query: insert into table acid_ivot values
+ (1, 2, 3, 4, 3.14, 2.34, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true),
+ (111, 222, 3333, 444, 13.14, 10239302.34239320, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@acid_ivot
+POSTHOOK: query: insert into table acid_ivot values
+ (1, 2, 3, 4, 3.14, 2.34, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true),
+ (111, 222, 3333, 444, 13.14, 10239302.34239320, 'fred', 'bob', '2014-09-01 10:34:23.111', '1944-06-06 06:00:00', true, true)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@acid_ivot
+POSTHOOK: Lineage: acid_ivot.cbigint EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.cboolean1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col11, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.cboolean2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col12, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.cdouble EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col6, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.cfloat EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.cint EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.csmallint EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.cstring1 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col7, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.cstring2 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col8, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.ctimestamp1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col9, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.ctimestamp2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivot.ctinyint EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select count(*) from acid_ivot
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_ivot
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from acid_ivot
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_ivot
+#### A masked pattern was here ####
+12290
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_values_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_values_partitioned.q.out b/ql/src/test/results/clientpositive/llap/insert_values_partitioned.q.out
new file mode 100644
index 0000000..6681992
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_values_partitioned.q.out
@@ -0,0 +1,66 @@
+PREHOOK: query: create table acid_ivp(ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(5,2),
+ t timestamp,
+ dt date,
+ s string,
+ vc varchar(128),
+ ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_ivp
+POSTHOOK: query: create table acid_ivp(ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(5,2),
+ t timestamp,
+ dt date,
+ s string,
+ vc varchar(128),
+ ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_ivp
+PREHOOK: query: insert into table acid_ivp partition (ds='today') values
+ (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'),
+ (3, 25, 6553, 429496729, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', 'its fleece was white as snow', 'a pocket full of posies', 'blue')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@acid_ivp@ds=today
+POSTHOOK: query: insert into table acid_ivp partition (ds='today') values
+ (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'),
+ (3, 25, 6553, 429496729, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', 'its fleece was white as snow', 'a pocket full of posies', 'blue')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@acid_ivp@ds=today
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).bi EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).ch EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col12, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).d EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col6, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).de EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col7, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).dt EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col9, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).f EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).s SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col10, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).si EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).t EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col8, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).ti EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivp PARTITION(ds=today).vc EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col11, type:string, comment:), ]
+PREHOOK: query: select * from acid_ivp order by i
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_ivp
+PREHOOK: Input: default@acid_ivp@ds=today
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_ivp order by i
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_ivp
+POSTHOOK: Input: default@acid_ivp@ds=today
+#### A masked pattern was here ####
+3 25 6553 429496729 0.14 1923.141592654 1.23 2014-08-24 17:21:30 2014-08-26 its fleece was white as snow a pocket full of posies blue today
+1 257 65537 4294967297 3.14 3.141592654 109.23 2014-08-25 17:21:30 2014-08-25 mary had a little lamb ring around the rosie red today
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_values_tmp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_values_tmp_table.q.out b/ql/src/test/results/clientpositive/llap/insert_values_tmp_table.q.out
new file mode 100644
index 0000000..170b4a7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_values_tmp_table.q.out
@@ -0,0 +1,36 @@
+PREHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_ivtt
+POSTHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_ivtt
+PREHOOK: query: insert into table acid_ivtt values
+ (1, 109.23, 'mary had a little lamb'),
+ (429496729, 0.14, 'its fleece was white as snow'),
+ (-29496729, -0.14, 'negative values test')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@acid_ivtt
+POSTHOOK: query: insert into table acid_ivtt values
+ (1, 109.23, 'mary had a little lamb'),
+ (429496729, 0.14, 'its fleece was white as snow'),
+ (-29496729, -0.14, 'negative values test')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@acid_ivtt
+POSTHOOK: Lineage: acid_ivtt.de EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivtt.i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: acid_ivtt.vc EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+PREHOOK: query: select i, de, vc from acid_ivtt order by i
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_ivtt
+#### A masked pattern was here ####
+POSTHOOK: query: select i, de, vc from acid_ivtt order by i
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_ivtt
+#### A masked pattern was here ####
+-29496729 -0.14 negative values test
+1 109.23 mary had a little lamb
+429496729 0.14 its fleece was white as snow
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/join0.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join0.q.java1.7.out b/ql/src/test/results/clientpositive/llap/join0.q.java1.7.out
new file mode 100644
index 0000000..5651839
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/join0.q.java1.7.out
@@ -0,0 +1,242 @@
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
+-- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10) src2
+ SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
+-- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10) src2
+ SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+ sort order: ++++
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: EXPLAIN FORMATTED
+SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10) src2
+ SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FORMATTED
+SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10) src2
+ SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+#### A masked pattern was here ####
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10) src2
+ SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10) src2
+ SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 val_0 0 val_0
+0 val_0 0 val_0
+0 val_0 0 val_0
+0 val_0 0 val_0
+0 val_0 0 val_0
+0 val_0 0 val_0
+0 val_0 0 val_0
+0 val_0 0 val_0
+0 val_0 0 val_0
+0 val_0 2 val_2
+0 val_0 2 val_2
+0 val_0 2 val_2
+0 val_0 4 val_4
+0 val_0 4 val_4
+0 val_0 4 val_4
+0 val_0 5 val_5
+0 val_0 5 val_5
+0 val_0 5 val_5
+0 val_0 5 val_5
+0 val_0 5 val_5
+0 val_0 5 val_5
+0 val_0 5 val_5
+0 val_0 5 val_5
+0 val_0 5 val_5
+0 val_0 8 val_8
+0 val_0 8 val_8
+0 val_0 8 val_8
+0 val_0 9 val_9
+0 val_0 9 val_9
+0 val_0 9 val_9
+2 val_2 0 val_0
+2 val_2 0 val_0
+2 val_2 0 val_0
+2 val_2 2 val_2
+2 val_2 4 val_4
+2 val_2 5 val_5
+2 val_2 5 val_5
+2 val_2 5 val_5
+2 val_2 8 val_8
+2 val_2 9 val_9
+4 val_4 0 val_0
+4 val_4 0 val_0
+4 val_4 0 val_0
+4 val_4 2 val_2
+4 val_4 4 val_4
+4 val_4 5 val_5
+4 val_4 5 val_5
+4 val_4 5 val_5
+4 val_4 8 val_8
+4 val_4 9 val_9
+5 val_5 0 val_0
+5 val_5 0 val_0
+5 val_5 0 val_0
+5 val_5 0 val_0
+5 val_5 0 val_0
+5 val_5 0 val_0
+5 val_5 0 val_0
+5 val_5 0 val_0
+5 val_5 0 val_0
+5 val_5 2 val_2
+5 val_5 2 val_2
+5 val_5 2 val_2
+5 val_5 4 val_4
+5 val_5 4 val_4
+5 val_5 4 val_4
+5 val_5 5 val_5
+5 val_5 5 val_5
+5 val_5 5 val_5
+5 val_5 5 val_5
+5 val_5 5 val_5
+5 val_5 5 val_5
+5 val_5 5 val_5
+5 val_5 5 val_5
+5 val_5 5 val_5
+5 val_5 8 val_8
+5 val_5 8 val_8
+5 val_5 8 val_8
+5 val_5 9 val_9
+5 val_5 9 val_9
+5 val_5 9 val_9
+8 val_8 0 val_0
+8 val_8 0 val_0
+8 val_8 0 val_0
+8 val_8 2 val_2
+8 val_8 4 val_4
+8 val_8 5 val_5
+8 val_8 5 val_5
+8 val_8 5 val_5
+8 val_8 8 val_8
+8 val_8 9 val_9
+9 val_9 0 val_0
+9 val_9 0 val_0
+9 val_9 0 val_0
+9 val_9 2 val_2
+9 val_9 4 val_4
+9 val_9 5 val_5
+9 val_9 5 val_5
+9 val_9 5 val_5
+9 val_9 8 val_8
+9 val_9 9 val_9
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join1.q.out b/ql/src/test/results/clientpositive/llap/join1.q.out
new file mode 100644
index 0000000..f719181
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/join1.q.out
@@ -0,0 +1,1158 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j1
+PREHOOK: query: EXPLAIN
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col2) (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_j1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_j1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest_j1
+POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT dest_j1.* FROM dest_j1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest_j1.* FROM dest_j1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+0 val_0
+0 val_0
+0 val_0
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+104 val_104
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+113 val_113
+113 val_113
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+118 val_118
+118 val_118
+118 val_118
+119 val_119
+119 val_119
+119 val_119
+119 val_119
+119 val_119
+119 val_119
+119 val_119
+119 val_119
+119 val_119
+12 val_12
+12 val_12
+12 val_12
+12 val_12
+120 val_120
+120 val_120
+120 val_120
+120 val_120
+125 val_125
+125 val_125
+125 val_125
+125 val_125
+126 val_126
+128 val_128
+128 val_128
+128 val_128
+128 val_128
+128 val_128
+128 val_128
+128 val_128
+128 val_128
+128 val_128
+129 val_129
+129 val_129
+129 val_129
+129 val_129
+131 val_131
+133 val_133
+134 val_134
+134 val_134
+134 val_134
+134 val_134
+136 val_136
+137 val_137
+137 val_137
+137 val_137
+137 val_137
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+143 val_143
+145 val_145
+146 val_146
+146 val_146
+146 val_146
+146 val_146
+149 val_149
+149 val_149
+149 val_149
+149 val_149
+15 val_15
+15 val_15
+15 val_15
+15 val_15
+150 val_150
+152 val_152
+152 val_152
+152 val_152
+152 val_152
+153 val_153
+155 val_155
+156 val_156
+157 val_157
+158 val_158
+160 val_160
+162 val_162
+163 val_163
+164 val_164
+164 val_164
+164 val_164
+164 val_164
+165 val_165
+165 val_165
+165 val_165
+165 val_165
+166 val_166
+167 val_167
+167 val_167
+167 val_167
+167 val_167
+167 val_167
+167 val_167
+167 val_167
+167 val_167
+167 val_167
+168 val_168
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+169 val_169
+17 val_17
+170 val_170
+172 val_172
+172 val_172
+172 val_172
+172 val_172
+174 val_174
+174 val_174
+174 val_174
+174 val_174
+175 val_175
+175 val_175
+175 val_175
+175 val_175
+176 val_176
+176 val_176
+176 val_176
+176 val_176
+177 val_177
+178 val_178
+179 val_179
+179 val_179
+179 val_179
+179 val_179
+18 val_18
+18 val_18
+18 val_18
+18 val_18
+180 val_180
+181 val_181
+183 val_183
+186 val_186
+187 val_187
+187 val_187
+187 val_187
+187 val_187
+187 val_187
+187 val_187
+187 val_187
+187 val_187
+187 val_187
+189 val_189
+19 val_19
+190 val_190
+191 val_191
+191 val_191
+191 val_191
+191 val_191
+192 val_192
+193 val_193
+193 val_193
+193 val_193
+193 val_193
+193 val_193
+193 val_193
+193 val_193
+193 val_193
+193 val_193
+194 val_194
+195 val_195
+195 val_195
+195 val_195
+195 val_195
+196 val_196
+197 val_197
+197 val_197
+197 val_197
+197 val_197
+199 val_199
+199 val_199
+199 val_199
+199 val_199
+199 val_199
+199 val_199
+199 val_199
+199 val_199
+199 val_199
+2 val_2
+20 val_20
+200 val_200
+200 val_200
+200 val_200
+200 val_200
+201 val_201
+202 val_202
+203 val_203
+203 val_203
+203 val_203
+203 val_203
+205 val_205
+205 val_205
+205 val_205
+205 val_205
+207 val_207
+207 val_207
+207 val_207
+207 val_207
+208 val_208
+208 val_208
+208 val_208
+208 val_208
+208 val_208
+208 val_208
+208 val_208
+208 val_208
+208 val_208
+209 val_209
+209 val_209
+209 val_209
+209 val_209
+213 val_213
+213 val_213
+213 val_213
+213 val_213
+214 val_214
+216 val_216
+216 val_216
+216 val_216
+216 val_216
+217 val_217
+217 val_217
+217 val_217
+217 val_217
+218 val_218
+219 val_219
+219 val_219
+219 val_219
+219 val_219
+221 val_221
+221 val_221
+221 val_221
+221 val_221
+222 val_222
+223 val_223
+223 val_223
+223 val_223
+223 val_223
+224 val_224
+224 val_224
+224 val_224
+224 val_224
+226 val_226
+228 val_228
+229 val_229
+229 val_229
+229 val_229
+229 val_229
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+233 val_233
+233 val_233
+233 val_233
+233 val_233
+235 val_235
+237 val_237
+237 val_237
+237 val_237
+237 val_237
+238 val_238
+238 val_238
+238 val_238
+238 val_238
+239 val_239
+239 val_239
+239 val_239
+239 val_239
+24 val_24
+24 val_24
+24 val_24
+24 val_24
+241 val_241
+242 val_242
+242 val_242
+242 val_242
+242 val_242
+244 val_244
+247 val_247
+248 val_248
+249 val_249
+252 val_252
+255 val_255
+255 val_255
+255 val_255
+255 val_255
+256 val_256
+256 val_256
+256 val_256
+256 val_256
+257 val_257
+258 val_258
+26 val_26
+26 val_26
+26 val_26
+26 val_26
+260 val_260
+262 val_262
+263 val_263
+265 val_265
+265 val_265
+265 val_265
+265 val_265
+266 val_266
+27 val_27
+272 val_272
+272 val_272
+272 val_272
+272 val_272
+273 val_273
+273 val_273
+273 val_273
+273 val_273
+273 val_273
+273 val_273
+273 val_273
+273 val_273
+273 val_273
+274 val_274
+275 val_275
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+277 val_277
+278 val_278
+278 val_278
+278 val_278
+278 val_278
+28 val_28
+280 val_280
+280 val_280
+280 val_280
+280 val_280
+281 val_281
+281 val_281
+281 val_281
+281 val_281
+282 val_282
+282 val_282
+282 val_282
+282 val_282
+283 val_283
+284 val_284
+285 val_285
+286 val_286
+287 val_287
+288 val_288
+288 val_288
+288 val_288
+288 val_288
+289 val_289
+291 val_291
+292 val_292
+296 val_296
+298 val_298
+298 val_298
+298 val_298
+298 val_298
+298 val_298
+298 val_298
+298 val_298
+298 val_298
+298 val_298
+30 val_30
+302 val_302
+305 val_305
+306 val_306
+307 val_307
+307 val_307
+307 val_307
+307 val_307
+308 val_308
+309 val_309
+309 val_309
+309 val_309
+309 val_309
+310 val_310
+311 val_311
+311 val_311
+311 val_311
+311 val_311
+311 val_311
+311 val_311
+311 val_311
+311 val_311
+311 val_311
+315 val_315
+316 val_316
+316 val_316
+316 val_316
+316 val_316
+316 val_316
+316 val_316
+316 val_316
+316 val_316
+316 val_316
+317 val_317
+317 val_317
+317 val_317
+317 val_317
+318 val_318
+318 val_318
+318 val_318
+318 val_318
+318 val_318
+318 val_318
+318 val_318
+318 val_318
+318 val_318
+321 val_321
+321 val_321
+321 val_321
+321 val_321
+322 val_322
+322 val_322
+322 val_322
+322 val_322
+323 val_323
+325 val_325
+325 val_325
+325 val_325
+325 val_325
+327 val_327
+327 val_327
+327 val_327
+327 val_327
+327 val_327
+327 val_327
+327 val_327
+327 val_327
+327 val_327
+33 val_33
+331 val_331
+331 val_331
+331 val_331
+331 val_331
+332 val_332
+333 val_333
+333 val_333
+333 val_333
+333 val_333
+335 val_335
+336 val_336
+338 val_338
+339 val_339
+34 val_34
+341 val_341
+342 val_342
+342 val_342
+342 val_342
+342 val_342
+344 val_344
+344 val_344
+344 val_344
+344 val_344
+345 val_345
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+35 val_35
+35 val_35
+35 val_35
+35 val_35
+35 val_35
+35 val_35
+35 val_35
+35 val_35
+35 val_35
+351 val_351
+353 val_353
+353 val_353
+353 val_353
+353 val_353
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+365 val_365
+366 val_366
+367 val_367
+367 val_367
+367 val_367
+367 val_367
+368 val_368
+369 val_369
+369 val_369
+369 val_369
+369 val_369
+369 val_369
+369 val_369
+369 val_369
+369 val_369
+369 val_369
+37 val_37
+37 val_37
+37 val_37
+37 val_37
+373 val_373
+374 val_374
+375 val_375
+377 val_377
+378 val_378
+379 val_379
+382 val_382
+382 val_382
+382 val_382
+382 val_382
+384 val_384
+384 val_384
+384 val_384
+384 val_384
+384 val_384
+384 val_384
+384 val_384
+384 val_384
+384 val_384
+386 val_386
+389 val_389
+392 val_392
+393 val_393
+394 val_394
+395 val_395
+395 val_395
+395 val_395
+395 val_395
+396 val_396
+396 val_396
+396 val_396
+396 val_396
+396 val_396
+396 val_396
+396 val_396
+396 val_396
+396 val_396
+397 val_397
+397 val_397
+397 val_397
+397 val_397
+399 val_399
+399 val_399
+399 val_399
+399 val_399
+4 val_4
+400 val_400
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+401 val_401
+402 val_402
+403 val_403
+403 val_403
+403 val_403
+403 val_403
+403 val_403
+403 val_403
+403 val_403
+403 val_403
+403 val_403
+404 val_404
+404 val_404
+404 val_404
+404 val_404
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+407 val_407
+409 val_409
+409 val_409
+409 val_409
+409 val_409
+409 val_409
+409 val_409
+409 val_409
+409 val_409
+409 val_409
+41 val_41
+411 val_411
+413 val_413
+413 val_413
+413 val_413
+413 val_413
+414 val_414
+414 val_414
+414 val_414
+414 val_414
+417 val_417
+417 val_417
+417 val_417
+417 val_417
+417 val_417
+417 val_417
+417 val_417
+417 val_417
+417 val_417
+418 val_418
+419 val_419
+42 val_42
+42 val_42
+42 val_42
+42 val_42
+421 val_421
+424 val_424
+424 val_424
+424 val_424
+424 val_424
+427 val_427
+429 val_429
+429 val_429
+429 val_429
+429 val_429
+43 val_43
+430 val_430
+430 val_430
+430 val_430
+430 val_430
+430 val_430
+430 val_430
+430 val_430
+430 val_430
+430 val_430
+431 val_431
+431 val_431
+431 val_431
+431 val_431
+431 val_431
+431 val_431
+431 val_431
+431 val_431
+431 val_431
+432 val_432
+435 val_435
+436 val_436
+437 val_437
+438 val_438
+438 val_438
+438 val_438
+438 val_438
+438 val_438
+438 val_438
+438 val_438
+438 val_438
+438 val_438
+439 val_439
+439 val_439
+439 val_439
+439 val_439
+44 val_44
+443 val_443
+444 val_444
+446 val_446
+448 val_448
+449 val_449
+452 val_452
+453 val_453
+454 val_454
+454 val_454
+454 val_454
+454 val_454
+454 val_454
+454 val_454
+454 val_454
+454 val_454
+454 val_454
+455 val_455
+457 val_457
+458 val_458
+458 val_458
+458 val_458
+458 val_458
+459 val_459
+459 val_459
+459 val_459
+459 val_459
+460 val_460
+462 val_462
+462 val_462
+462 val_462
+462 val_462
+463 val_463
+463 val_463
+463 val_463
+463 val_463
+466 val_466
+466 val_466
+466 val_466
+466 val_466
+466 val_466
+466 val_466
+466 val_466
+466 val_466
+466 val_466
+467 val_467
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+469 val_469
+47 val_47
+470 val_470
+472 val_472
+475 val_475
+477 val_477
+478 val_478
+478 val_478
+478 val_478
+478 val_478
+479 val_479
+480 val_480
+480 val_480
+480 val_480
+480 val_480
+480 val_480
+480 val_480
+480 val_480
+480 val_480
+480 val_480
+481 val_481
+482 val_482
+483 val_483
+484 val_484
+485 val_485
+487 val_487
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+489 val_489
+490 val_490
+491 val_491
+492 val_492
+492 val_492
+492 val_492
+492 val_492
+493 val_493
+494 val_494
+495 val_495
+496 val_496
+497 val_497
+498 val_498
+498 val_498
+498 val_498
+498 val_498
+498 val_498
+498 val_498
+498 val_498
+498 val_498
+498 val_498
+5 val_5
+5 val_5
+5 val_5
+5 val_5
+5 val_5
+5 val_5
+5 val_5
+5 val_5
+5 val_5
+51 val_51
+51 val_51
+51 val_51
+51 val_51
+53 val_53
+54 val_54
+57 val_57
+58 val_58
+58 val_58
+58 val_58
+58 val_58
+64 val_64
+65 val_65
+66 val_66
+67 val_67
+67 val_67
+67 val_67
+67 val_67
+69 val_69
+70 val_70
+70 val_70
+70 val_70
+70 val_70
+70 val_70
+70 val_70
+70 val_70
+70 val_70
+70 val_70
+72 val_72
+72 val_72
+72 val_72
+72 val_72
+74 val_74
+76 val_76
+76 val_76
+76 val_76
+76 val_76
+77 val_77
+78 val_78
+8 val_8
+80 val_80
+82 val_82
+83 val_83
+83 val_83
+83 val_83
+83 val_83
+84 val_84
+84 val_84
+84 val_84
+84 val_84
+85 val_85
+86 val_86
+87 val_87
+9 val_9
+90 val_90
+90 val_90
+90 val_90
+90 val_90
+90 val_90
+90 val_90
+90 val_90
+90 val_90
+90 val_90
+92 val_92
+95 val_95
+95 val_95
+95 val_95
+95 val_95
+96 val_96
+97 val_97
+97 val_97
+97 val_97
+97 val_97
+98 val_98
+98 val_98
+98 val_98
+98 val_98
[24/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/explainuser_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_3.q.out b/ql/src/test/results/clientpositive/llap/explainuser_3.q.out
new file mode 100644
index 0000000..79c7116
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/explainuser_3.q.out
@@ -0,0 +1,522 @@
+PREHOOK: query: explain select key, value
+FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, value
+FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol
+POSTHOOK: type: QUERY
+Plan not optimized by CBO.
+
+Stage-0
+ Fetch Operator
+ limit:-1
+ Select Operator [SEL_6]
+ outputColumnNames:["_col0","_col1"]
+ Lateral View Join Operator [LVJ_5]
+ outputColumnNames:["_col0","_col1","_col7"]
+ Select Operator [SEL_2]
+ outputColumnNames:["key","value"]
+ Lateral View Forward [LVF_1]
+ TableScan [TS_0]
+ alias:srcpart
+ Select Operator [SEL_6]
+ outputColumnNames:["_col0","_col1"]
+ Lateral View Join Operator [LVJ_5]
+ outputColumnNames:["_col0","_col1","_col7"]
+ UDTF Operator [UDTF_4]
+ function name:explode
+ Select Operator [SEL_3]
+ outputColumnNames:["_col0"]
+ Please refer to the previous Lateral View Forward [LVF_1]
+
+PREHOOK: query: explain show tables
+PREHOOK: type: SHOWTABLES
+POSTHOOK: query: explain show tables
+POSTHOOK: type: SHOWTABLES
+Stage-1
+ Fetch Operator
+ limit:-1
+ Stage-0
+ Show Table Operator:
+ database name:default
+
+#### A masked pattern was here ####
+PREHOOK: type: CREATEDATABASE
+#### A masked pattern was here ####
+POSTHOOK: type: CREATEDATABASE
+Stage-0
+
+#### A masked pattern was here ####
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:newDB
+#### A masked pattern was here ####
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:newDB
+#### A masked pattern was here ####
+PREHOOK: query: explain describe database extended newDB
+PREHOOK: type: DESCDATABASE
+POSTHOOK: query: explain describe database extended newDB
+POSTHOOK: type: DESCDATABASE
+Stage-1
+ Fetch Operator
+ limit:-1
+ Stage-0
+
+PREHOOK: query: describe database extended newDB
+PREHOOK: type: DESCDATABASE
+PREHOOK: Input: database:newdb
+POSTHOOK: query: describe database extended newDB
+POSTHOOK: type: DESCDATABASE
+POSTHOOK: Input: database:newdb
+newdb location/in/test hive_test_user USER
+PREHOOK: query: explain use newDB
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: explain use newDB
+POSTHOOK: type: SWITCHDATABASE
+Stage-0
+
+PREHOOK: query: use newDB
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:newdb
+POSTHOOK: query: use newDB
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:newdb
+PREHOOK: query: create table tab (name string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:newdb
+PREHOOK: Output: newDB@tab
+POSTHOOK: query: create table tab (name string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:newdb
+POSTHOOK: Output: newDB@tab
+PREHOOK: query: explain alter table tab rename to newName
+PREHOOK: type: ALTERTABLE_RENAME
+POSTHOOK: query: explain alter table tab rename to newName
+POSTHOOK: type: ALTERTABLE_RENAME
+Stage-0
+ Alter Table Operator:
+ new name:newDB.newName
+ old name:newDB.tab
+ type:rename
+
+PREHOOK: query: explain drop table tab
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: explain drop table tab
+POSTHOOK: type: DROPTABLE
+Stage-0
+ Drop Table Operator:
+ table:tab
+
+PREHOOK: query: drop table tab
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: newdb@tab
+PREHOOK: Output: newdb@tab
+POSTHOOK: query: drop table tab
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: newdb@tab
+POSTHOOK: Output: newdb@tab
+PREHOOK: query: explain use default
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: explain use default
+POSTHOOK: type: SWITCHDATABASE
+Stage-0
+
+PREHOOK: query: use default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: use default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: drop database newDB
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:newdb
+PREHOOK: Output: database:newdb
+POSTHOOK: query: drop database newDB
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:newdb
+POSTHOOK: Output: database:newdb
+PREHOOK: query: explain analyze table src compute statistics
+PREHOOK: type: QUERY
+POSTHOOK: query: explain analyze table src compute statistics
+POSTHOOK: type: QUERY
+Stage-2
+ Stats-Aggr Operator
+ Stage-0
+ Map 1
+ TableScan [TS_0]
+ alias:src
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+
+PREHOOK: query: explain analyze table src compute statistics for columns
+PREHOOK: type: QUERY
+POSTHOOK: query: explain analyze table src compute statistics for columns
+POSTHOOK: type: QUERY
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+
+Stage-2
+ Column Stats Work{}
+ Stage-0
+ Reducer 2
+ File Output Operator [FS_6]
+ compressed:false
+ Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
+ Group By Operator [GBY_4]
+ | aggregations:["compute_stats(VALUE._col0)","compute_stats(VALUE._col1)"]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ |<-Map 1 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_3]
+ sort order:
+ Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ value expressions:_col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+ Group By Operator [GBY_2]
+ aggregations:["compute_stats(key, 16)","compute_stats(value, 16)"]
+ outputColumnNames:["_col0","_col1"]
+ Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+ Select Operator [SEL_1]
+ outputColumnNames:["key","value"]
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TableScan [TS_0]
+ alias:src
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+
+PREHOOK: query: explain
+CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x))
+PREHOOK: type: CREATEMACRO
+POSTHOOK: query: explain
+CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x))
+POSTHOOK: type: CREATEMACRO
+Stage-0
+
+PREHOOK: query: CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x))
+PREHOOK: type: CREATEMACRO
+PREHOOK: Output: database:default
+POSTHOOK: query: CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x))
+POSTHOOK: type: CREATEMACRO
+POSTHOOK: Output: database:default
+PREHOOK: query: EXPLAIN SELECT SIGMOID(2) FROM src LIMIT 1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT SIGMOID(2) FROM src LIMIT 1
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Stage-0
+ Fetch Operator
+ limit:1
+ Limit [LIM_2]
+ Number of rows:1
+ Select Operator [SEL_1]
+ outputColumnNames:["_col0"]
+ TableScan [TS_0]
+ alias:src
+
+PREHOOK: query: explain DROP TEMPORARY MACRO SIGMOID
+PREHOOK: type: DROPMACRO
+POSTHOOK: query: explain DROP TEMPORARY MACRO SIGMOID
+POSTHOOK: type: DROPMACRO
+Stage-0
+
+PREHOOK: query: DROP TEMPORARY MACRO SIGMOID
+PREHOOK: type: DROPMACRO
+PREHOOK: Output: database:default
+POSTHOOK: query: DROP TEMPORARY MACRO SIGMOID
+POSTHOOK: type: DROPMACRO
+POSTHOOK: Output: database:default
+PREHOOK: query: explain create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: explain create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+Plan optimized by CBO.
+
+Stage-3
+ Stats-Aggr Operator
+ Stage-4
+ Create Table Operator:
+ columns:["key string","value string"]
+ input format:org.apache.hadoop.mapred.TextInputFormat
+ name:default.src_autho_test
+ output format:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ Stage-2
+ Dependency Collection{}
+ Stage-1
+ Map 1
+ File Output Operator [FS_2]
+ compressed:false
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.src_autho_test"}
+ Select Operator [SEL_1]
+ outputColumnNames:["_col0","_col1"]
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TableScan [TS_0]
+ alias:src
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Stage-0
+ Move Operator
+ Please refer to the previous Stage-1
+
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+PREHOOK: query: explain grant select on table src_autho_test to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: query: explain grant select on table src_autho_test to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+Stage-0
+
+PREHOOK: query: grant select on table src_autho_test to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: grant select on table src_autho_test to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@src_autho_test
+PREHOOK: query: explain show grant user hive_test_user on table src_autho_test
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: explain show grant user hive_test_user on table src_autho_test
+POSTHOOK: type: SHOW_GRANT
+Stage-1
+ Fetch Operator
+ limit:-1
+ Stage-0
+
+PREHOOK: query: explain show grant user hive_test_user on table src_autho_test(key)
+PREHOOK: type: SHOW_GRANT
+POSTHOOK: query: explain show grant user hive_test_user on table src_autho_test(key)
+POSTHOOK: type: SHOW_GRANT
+Stage-1
+ Fetch Operator
+ limit:-1
+ Stage-0
+
+PREHOOK: query: select key from src_autho_test order by key limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_autho_test
+#### A masked pattern was here ####
+POSTHOOK: query: select key from src_autho_test order by key limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_autho_test
+#### A masked pattern was here ####
+0
+0
+0
+10
+100
+100
+103
+103
+104
+104
+105
+11
+111
+113
+113
+114
+116
+118
+118
+119
+PREHOOK: query: explain revoke select on table src_autho_test from user hive_test_user
+PREHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: query: explain revoke select on table src_autho_test from user hive_test_user
+POSTHOOK: type: REVOKE_PRIVILEGE
+Stage-0
+
+PREHOOK: query: explain grant select(key) on table src_autho_test to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: query: explain grant select(key) on table src_autho_test to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+Stage-0
+
+PREHOOK: query: explain revoke select(key) on table src_autho_test from user hive_test_user
+PREHOOK: type: REVOKE_PRIVILEGE
+POSTHOOK: query: explain revoke select(key) on table src_autho_test from user hive_test_user
+POSTHOOK: type: REVOKE_PRIVILEGE
+Stage-0
+
+PREHOOK: query: explain
+create role sRc_roLE
+PREHOOK: type: CREATEROLE
+POSTHOOK: query: explain
+create role sRc_roLE
+POSTHOOK: type: CREATEROLE
+Stage-0
+
+PREHOOK: query: create role sRc_roLE
+PREHOOK: type: CREATEROLE
+POSTHOOK: query: create role sRc_roLE
+POSTHOOK: type: CREATEROLE
+PREHOOK: query: explain
+grant role sRc_roLE to user hive_test_user
+PREHOOK: type: GRANT_ROLE
+POSTHOOK: query: explain
+grant role sRc_roLE to user hive_test_user
+POSTHOOK: type: GRANT_ROLE
+Stage-0
+
+PREHOOK: query: grant role sRc_roLE to user hive_test_user
+PREHOOK: type: GRANT_ROLE
+POSTHOOK: query: grant role sRc_roLE to user hive_test_user
+POSTHOOK: type: GRANT_ROLE
+PREHOOK: query: explain show role grant user hive_test_user
+PREHOOK: type: SHOW_ROLE_GRANT
+POSTHOOK: query: explain show role grant user hive_test_user
+POSTHOOK: type: SHOW_ROLE_GRANT
+Stage-1
+ Fetch Operator
+ limit:-1
+ Stage-0
+
+PREHOOK: query: explain drop role sRc_roLE
+PREHOOK: type: DROPROLE
+POSTHOOK: query: explain drop role sRc_roLE
+POSTHOOK: type: DROPROLE
+Stage-0
+
+PREHOOK: query: drop role sRc_roLE
+PREHOOK: type: DROPROLE
+POSTHOOK: query: drop role sRc_roLE
+POSTHOOK: type: DROPROLE
+PREHOOK: query: drop table src_autho_test
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: drop table src_autho_test
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Output: default@src_autho_test
+PREHOOK: query: explain drop view v
+PREHOOK: type: DROPVIEW
+POSTHOOK: query: explain drop view v
+POSTHOOK: type: DROPVIEW
+Stage-0
+ Drop Table Operator:
+ table:v
+
+PREHOOK: query: explain create view v as with cte as (select * from src order by key limit 5)
+select * from cte
+PREHOOK: type: CREATEVIEW
+POSTHOOK: query: explain create view v as with cte as (select * from src order by key limit 5)
+select * from cte
+POSTHOOK: type: CREATEVIEW
+Plan not optimized by CBO.
+
+Stage-0
+ Create View Operator:
+ name:default.v
+ original text:with cte as (select * from src order by key limit 5)
+select * from cte
+
+PREHOOK: query: explain with cte as (select * from src order by key limit 5)
+select * from cte
+PREHOOK: type: QUERY
+POSTHOOK: query: explain with cte as (select * from src order by key limit 5)
+select * from cte
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+
+Stage-0
+ Fetch Operator
+ limit:5
+ Stage-1
+ Reducer 2
+ File Output Operator [FS_5]
+ compressed:false
+ Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
+ Limit [LIM_4]
+ Number of rows:5
+ Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
+ Select Operator [SEL_3]
+ | outputColumnNames:["_col0","_col1"]
+ | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ |<-Map 1 [SIMPLE_EDGE]
+ Reduce Output Operator [RS_2]
+ key expressions:_col0 (type: string)
+ sort order:+
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions:_col1 (type: string)
+ Select Operator [SEL_1]
+ outputColumnNames:["_col0","_col1"]
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ TableScan [TS_0]
+ alias:src
+ Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+
+PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: explain insert overwrite table orc_merge5 select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table orc_merge5 select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Stage-3
+ Stats-Aggr Operator
+ Stage-0
+ Move Operator
+ table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5"}
+ Stage-2
+ Dependency Collection{}
+ Stage-5(CONDITIONAL)
+ Move Operator
+ Stage-8(CONDITIONAL CHILD TASKS: Stage-5, Stage-4, Stage-6)
+ Conditional Operator
+ Stage-1
+ Map 1
+ File Output Operator [FS_3]
+ compressed:false
+ Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:{"input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat","serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5"}
+ Select Operator [SEL_2]
+ outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
+ Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator [FIL_4]
+ predicate:(userid <= 13) (type: boolean)
+ Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ TableScan [TS_0]
+ alias:orc_merge5
+ Statistics:Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Stage-4(CONDITIONAL)
+ File Merge
+ ORC File Merge Operator [OFM_7]
+ Please refer to the previous Stage-8(CONDITIONAL CHILD TASKS: Stage-5, Stage-4, Stage-6)
+ Stage-7
+ Move Operator
+ Stage-6(CONDITIONAL)
+ File Merge
+ ORC File Merge Operator [OFM_7]
+ Please refer to the previous Stage-8(CONDITIONAL CHILD TASKS: Stage-5, Stage-4, Stage-6)
+
+PREHOOK: query: drop table orc_merge5
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: drop table orc_merge5
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out b/ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out
new file mode 100644
index 0000000..b8e738c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out
@@ -0,0 +1,445 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@filter_join_breaktask
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@filter_join_breaktask
+PREHOOK: query: INSERT OVERWRITE TABLE filter_join_breaktask PARTITION(ds='2008-04-08')
+SELECT key, value from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@filter_join_breaktask@ds=2008-04-08
+POSTHOOK: query: INSERT OVERWRITE TABLE filter_join_breaktask PARTITION(ds='2008-04-08')
+SELECT key, value from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@filter_join_breaktask@ds=2008-04-08
+POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).key EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: filter_join_breaktask PARTITION(ds=2008-04-08).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: EXPLAIN EXTENDED
+SELECT f.key, g.value
+FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null)
+JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='')
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+SELECT f.key, g.value
+FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null)
+JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='')
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ filter_join_breaktask
+ f
+ TOK_TABREF
+ TOK_TABNAME
+ filter_join_breaktask
+ m
+ AND
+ AND
+ AND
+ =
+ .
+ TOK_TABLE_OR_COL
+ f
+ key
+ .
+ TOK_TABLE_OR_COL
+ m
+ key
+ =
+ .
+ TOK_TABLE_OR_COL
+ f
+ ds
+ '2008-04-08'
+ =
+ .
+ TOK_TABLE_OR_COL
+ m
+ ds
+ '2008-04-08'
+ TOK_FUNCTION
+ TOK_ISNOTNULL
+ .
+ TOK_TABLE_OR_COL
+ f
+ key
+ TOK_TABREF
+ TOK_TABNAME
+ filter_join_breaktask
+ g
+ AND
+ AND
+ AND
+ AND
+ =
+ .
+ TOK_TABLE_OR_COL
+ g
+ value
+ .
+ TOK_TABLE_OR_COL
+ m
+ value
+ =
+ .
+ TOK_TABLE_OR_COL
+ g
+ ds
+ '2008-04-08'
+ =
+ .
+ TOK_TABLE_OR_COL
+ m
+ ds
+ '2008-04-08'
+ TOK_FUNCTION
+ TOK_ISNOTNULL
+ .
+ TOK_TABLE_OR_COL
+ m
+ value
+ !=
+ .
+ TOK_TABLE_OR_COL
+ m
+ value
+ ''
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ .
+ TOK_TABLE_OR_COL
+ f
+ key
+ TOK_SELEXPR
+ .
+ TOK_TABLE_OR_COL
+ g
+ value
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: f
+ Statistics: Num rows: 25 Data size: 211 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 109 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 13 Data size: 109 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.filter_join_breaktask
+ numFiles 1
+ numRows 25
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 211
+ serialization.ddl struct filter_join_breaktask { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 236
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.filter_join_breaktask
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct filter_join_breaktask { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.filter_join_breaktask
+ name: default.filter_join_breaktask
+ Truncated Path -> Alias:
+ /filter_join_breaktask/ds=2008-04-08 [f]
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: m
+ Statistics: Num rows: 25 Data size: 211 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: ((key is not null and value is not null) and (value <> '')) (type: boolean)
+ Statistics: Num rows: 7 Data size: 59 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 7 Data size: 59 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ value expressions: value (type: string)
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.filter_join_breaktask
+ numFiles 1
+ numRows 25
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 211
+ serialization.ddl struct filter_join_breaktask { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 236
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.filter_join_breaktask
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct filter_join_breaktask { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.filter_join_breaktask
+ name: default.filter_join_breaktask
+ Truncated Path -> Alias:
+ /filter_join_breaktask/ds=2008-04-08 [m]
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: g
+ Statistics: Num rows: 25 Data size: 211 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: ((value <> '') and value is not null) (type: boolean)
+ Statistics: Num rows: 13 Data size: 109 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 13 Data size: 109 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.filter_join_breaktask
+ numFiles 1
+ numRows 25
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 211
+ serialization.ddl struct filter_join_breaktask { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 236
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.filter_join_breaktask
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct filter_join_breaktask { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.filter_join_breaktask
+ name: default.filter_join_breaktask
+ Truncated Path -> Alias:
+ /filter_join_breaktask/ds=2008-04-08 [g]
+ Reducer 2
+ Execution mode: llap
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col7
+ Position of Big Table: 0
+ Statistics: Num rows: 14 Data size: 119 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col7 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col7 (type: string)
+ Statistics: Num rows: 14 Data size: 119 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ value expressions: _col0 (type: int)
+ auto parallelism: true
+ Reducer 3
+ Execution mode: llap
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col7 (type: string)
+ 1 value (type: string)
+ outputColumnNames: _col0, _col13
+ Position of Big Table: 0
+ Statistics: Num rows: 15 Data size: 130 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col13 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 15 Data size: 130 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 15 Data size: 130 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0,_col1
+ columns.types int:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT f.key, g.value
+FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null)
+JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@filter_join_breaktask
+PREHOOK: Input: default@filter_join_breaktask@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT f.key, g.value
+FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null)
+JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND m.ds='2008-04-08' AND m.value is not null AND m.value !='')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@filter_join_breaktask
+POSTHOOK: Input: default@filter_join_breaktask@ds=2008-04-08
+#### A masked pattern was here ####
+146 val_146
+150 val_150
+213 val_213
+238 val_238
+255 val_255
+273 val_273
+278 val_278
+311 val_311
+401 val_401
+406 val_406
+66 val_66
+98 val_98
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/filter_join_breaktask2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/filter_join_breaktask2.q.out b/ql/src/test/results/clientpositive/llap/filter_join_breaktask2.q.out
new file mode 100644
index 0000000..af85af9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/filter_join_breaktask2.q.out
@@ -0,0 +1,272 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string)
+partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string)
+partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T1
+PREHOOK: query: create table T2(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T2
+POSTHOOK: query: create table T2(c1 string, c2 string, c3 string, c0 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T2
+PREHOOK: query: create table T3 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T3
+POSTHOOK: query: create table T3 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T3
+PREHOOK: query: create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c8
1 bigint, c82 bigint, c83 bigint) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@T4
+POSTHOOK: query: create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c
81 bigint, c82 bigint, c83 bigint) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@T4
+PREHOOK: query: insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t1@ds=2010-04-17
+POSTHOOK: query: insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t1@ds=2010-04-17
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c1 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c2 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c3 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c4 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c5 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c6 SIMPLE []
+POSTHOOK: Lineage: t1 PARTITION(ds=2010-04-17).c7 SIMPLE []
+PREHOOK: query: insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t2@ds=2010-04-17
+POSTHOOK: query: insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t2@ds=2010-04-17
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c0 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c1 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c10 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c11 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c12 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c13 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c14 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c15 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c16 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c17 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c18 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c19 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c2 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c20 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c21 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c22 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c23 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c24 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c25 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c3 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c4 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c5 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c6 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c7 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c8 SIMPLE []
+POSTHOOK: Lineage: t2 PARTITION(ds=2010-04-17).c9 SIMPLE []
+PREHOOK: query: insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t3@ds=2010-04-17
+POSTHOOK: query: insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t3@ds=2010-04-17
+POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c0 EXPRESSION []
+POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c1 EXPRESSION []
+POSTHOOK: Lineage: t3 PARTITION(ds=2010-04-17).c2 SIMPLE []
+PREHOOK: query: insert overwrite table T4 partition(ds='2010-04-17')
+select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t4@ds=2010-04-17
+POSTHOOK: query: insert overwrite table T4 partition(ds='2010-04-17')
+select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t4@ds=2010-04-17
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c0 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c1 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c10 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c11 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c12 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c13 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c14 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c15 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c16 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c17 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c18 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c19 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c2 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c20 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c21 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c22 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c23 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c24 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c25 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c26 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c27 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c28 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c29 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c3 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c30 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c31 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c32 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c33 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c34 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c35 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c36 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c37 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c38 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c39 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c4 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c40 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c41 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c42 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c43 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c44 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c45 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c46 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c47 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c48 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c49 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c5 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c50 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c51 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c52 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c53 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c54 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c55 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c56 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c57 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c58 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c59 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c6 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c60 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c61 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c62 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c63 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c64 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c65 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c66 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c67 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c68 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c69 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c7 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c70 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c71 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c72 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c73 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c74 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c75 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c76 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c77 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c78 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c79 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c8 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c80 SIMPLE []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c81 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c82 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c83 EXPRESSION []
+POSTHOOK: Lineage: t4 PARTITION(ds=2010-04-17).c9 SIMPLE []
+PREHOOK: query: select * from T2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t2@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t2@ds=2010-04-17
+#### A masked pattern was here ####
+5 name NULL 2 kavin NULL 9 c 8 0 0 7 1 2 0 3 2 NULL 1 NULL 3 2 0 0 5 10 2010-04-17
+PREHOOK: query: select * from T1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t1@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t1@ds=2010-04-17
+#### A masked pattern was here ####
+5 1 1 1 0 0 4 2010-04-17
+PREHOOK: query: select * from T3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t3@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t3@ds=2010-04-17
+#### A masked pattern was here ####
+4 5 0 2010-04-17
+PREHOOK: query: select * from T4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+PREHOOK: Input: default@t4@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: select * from T4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t4
+POSTHOOK: Input: default@t4@ds=2010-04-17
+#### A masked pattern was here ####
+4 1 1 8 4 5 1 0 9 U 2 2 0 2 1 1 J C A U 2 s 2 NULL NULL NULL NULL NULL NULL 1 j S 6 NULL 1 2 J g 1 e 2 1 2 U P p 3 0 0 0 1 1 1 0 0 0 6 2 j NULL NULL NULL NULL NULL NULL 5 NULL NULL j 2 2 1 2 2 1 1 1 1 1 1 1 1 32 NULL 2010-04-17
+WARNING: Comparing a bigint and a string may result in a loss of precision.
+PREHOOK: query: SELECT a.c1 as a_c1, b.c1 b_c1, d.c0 as d_c0
+FROM T1 a JOIN T2 b
+ ON (a.c1 = b.c1 AND a.ds='2010-04-17' AND b.ds='2010-04-17')
+ JOIN T3 c
+ ON (a.c1 = c.c1 AND a.ds='2010-04-17' AND c.ds='2010-04-17')
+ JOIN T4 d
+ ON (c.c0 = d.c0 AND c.ds='2010-04-17' AND d.ds='2010-04-17')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t1@ds=2010-04-17
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t2@ds=2010-04-17
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t3@ds=2010-04-17
+PREHOOK: Input: default@t4
+PREHOOK: Input: default@t4@ds=2010-04-17
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a.c1 as a_c1, b.c1 b_c1, d.c0 as d_c0
+FROM T1 a JOIN T2 b
+ ON (a.c1 = b.c1 AND a.ds='2010-04-17' AND b.ds='2010-04-17')
+ JOIN T3 c
+ ON (a.c1 = c.c1 AND a.ds='2010-04-17' AND c.ds='2010-04-17')
+ JOIN T4 d
+ ON (c.c0 = d.c0 AND c.ds='2010-04-17' AND d.ds='2010-04-17')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t1@ds=2010-04-17
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t2@ds=2010-04-17
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t3@ds=2010-04-17
+POSTHOOK: Input: default@t4
+POSTHOOK: Input: default@t4@ds=2010-04-17
+#### A masked pattern was here ####
+5 5 4
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/groupby1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/groupby1.q.out b/ql/src/test/results/clientpositive/llap/groupby1.q.out
new file mode 100644
index 0000000..1323a73
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/groupby1.q.out
@@ -0,0 +1,428 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_g1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_g1
+PREHOOK: query: EXPLAIN
+FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), substr(value, 5) (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: rand() (type: double)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: partial1
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: double)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: final
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), _col1 (type: double)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_g1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_g1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest_g1
+POSTHOOK: query: FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest_g1
+POSTHOOK: Lineage: dest_g1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_g1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT dest_g1.* FROM dest_g1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_g1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest_g1.* FROM dest_g1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_g1
+#### A masked pattern was here ####
+0 0.0
+10 10.0
+100 200.0
+103 206.0
+104 208.0
+105 105.0
+11 11.0
+111 111.0
+113 226.0
+114 114.0
+116 116.0
+118 236.0
+119 357.0
+12 24.0
+120 240.0
+125 250.0
+126 126.0
+128 384.0
+129 258.0
+131 131.0
+133 133.0
+134 268.0
+136 136.0
+137 274.0
+138 552.0
+143 143.0
+145 145.0
+146 292.0
+149 298.0
+15 30.0
+150 150.0
+152 304.0
+153 153.0
+155 155.0
+156 156.0
+157 157.0
+158 158.0
+160 160.0
+162 162.0
+163 163.0
+164 328.0
+165 330.0
+166 166.0
+167 501.0
+168 168.0
+169 676.0
+17 17.0
+170 170.0
+172 344.0
+174 348.0
+175 350.0
+176 352.0
+177 177.0
+178 178.0
+179 358.0
+18 36.0
+180 180.0
+181 181.0
+183 183.0
+186 186.0
+187 561.0
+189 189.0
+19 19.0
+190 190.0
+191 382.0
+192 192.0
+193 579.0
+194 194.0
+195 390.0
+196 196.0
+197 394.0
+199 597.0
+2 2.0
+20 20.0
+200 400.0
+201 201.0
+202 202.0
+203 406.0
+205 410.0
+207 414.0
+208 624.0
+209 418.0
+213 426.0
+214 214.0
+216 432.0
+217 434.0
+218 218.0
+219 438.0
+221 442.0
+222 222.0
+223 446.0
+224 448.0
+226 226.0
+228 228.0
+229 458.0
+230 1150.0
+233 466.0
+235 235.0
+237 474.0
+238 476.0
+239 478.0
+24 48.0
+241 241.0
+242 484.0
+244 244.0
+247 247.0
+248 248.0
+249 249.0
+252 252.0
+255 510.0
+256 512.0
+257 257.0
+258 258.0
+26 52.0
+260 260.0
+262 262.0
+263 263.0
+265 530.0
+266 266.0
+27 27.0
+272 544.0
+273 819.0
+274 274.0
+275 275.0
+277 1108.0
+278 556.0
+28 28.0
+280 560.0
+281 562.0
+282 564.0
+283 283.0
+284 284.0
+285 285.0
+286 286.0
+287 287.0
+288 576.0
+289 289.0
+291 291.0
+292 292.0
+296 296.0
+298 894.0
+30 30.0
+302 302.0
+305 305.0
+306 306.0
+307 614.0
+308 308.0
+309 618.0
+310 310.0
+311 933.0
+315 315.0
+316 948.0
+317 634.0
+318 954.0
+321 642.0
+322 644.0
+323 323.0
+325 650.0
+327 981.0
+33 33.0
+331 662.0
+332 332.0
+333 666.0
+335 335.0
+336 336.0
+338 338.0
+339 339.0
+34 34.0
+341 341.0
+342 684.0
+344 688.0
+345 345.0
+348 1740.0
+35 105.0
+351 351.0
+353 706.0
+356 356.0
+360 360.0
+362 362.0
+364 364.0
+365 365.0
+366 366.0
+367 734.0
+368 368.0
+369 1107.0
+37 74.0
+373 373.0
+374 374.0
+375 375.0
+377 377.0
+378 378.0
+379 379.0
+382 764.0
+384 1152.0
+386 386.0
+389 389.0
+392 392.0
+393 393.0
+394 394.0
+395 790.0
+396 1188.0
+397 794.0
+399 798.0
+4 4.0
+400 400.0
+401 2005.0
+402 402.0
+403 1209.0
+404 808.0
+406 1624.0
+407 407.0
+409 1227.0
+41 41.0
+411 411.0
+413 826.0
+414 828.0
+417 1251.0
+418 418.0
+419 419.0
+42 84.0
+421 421.0
+424 848.0
+427 427.0
+429 858.0
+43 43.0
+430 1290.0
+431 1293.0
+432 432.0
+435 435.0
+436 436.0
+437 437.0
+438 1314.0
+439 878.0
+44 44.0
+443 443.0
+444 444.0
+446 446.0
+448 448.0
+449 449.0
+452 452.0
+453 453.0
+454 1362.0
+455 455.0
+457 457.0
+458 916.0
+459 918.0
+460 460.0
+462 924.0
+463 926.0
+466 1398.0
+467 467.0
+468 1872.0
+469 2345.0
+47 47.0
+470 470.0
+472 472.0
+475 475.0
+477 477.0
+478 956.0
+479 479.0
+480 1440.0
+481 481.0
+482 482.0
+483 483.0
+484 484.0
+485 485.0
+487 487.0
+489 1956.0
+490 490.0
+491 491.0
+492 984.0
+493 493.0
+494 494.0
+495 495.0
+496 496.0
+497 497.0
+498 1494.0
+5 15.0
+51 102.0
+53 53.0
+54 54.0
+57 57.0
+58 116.0
+64 64.0
+65 65.0
+66 66.0
+67 134.0
+69 69.0
+70 210.0
+72 144.0
+74 74.0
+76 152.0
+77 77.0
+78 78.0
+8 8.0
+80 80.0
+82 82.0
+83 166.0
+84 168.0
+85 85.0
+86 86.0
+87 87.0
+9 9.0
+90 270.0
+92 92.0
+95 190.0
+96 96.0
+97 194.0
+98 196.0
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/groupby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/groupby2.q.out b/ql/src/test/results/clientpositive/llap/groupby2.q.out
new file mode 100644
index 0000000..94d8c81
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/groupby2.q.out
@@ -0,0 +1,133 @@
+PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_g2
+POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_g2
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: substr(key, 1, 1) (type: string), substr(value, 5) (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(DISTINCT KEY._col1:0._col0), sum(KEY._col1:0._col0)
+ keys: KEY._col0 (type: string)
+ mode: partial1
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col2 (type: double)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), sum(VALUE._col1)
+ keys: KEY._col0 (type: string)
+ mode: final
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_g2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_g2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest_g2
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest_g2
+POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT dest_g2.* FROM dest_g2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_g2
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+SELECT dest_g2.* FROM dest_g2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_g2
+#### A masked pattern was here ####
+0 1 00.0
+1 71 116414.0
+2 69 225571.0
+3 62 332004.0
+4 74 452763.0
+5 6 5397.0
+6 5 6398.0
+7 6 7735.0
+8 8 8762.0
+9 7 91047.0
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/groupby3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/groupby3.q.out b/ql/src/test/results/clientpositive/llap/groupby3.q.out
new file mode 100644
index 0000000..75f0c36
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/groupby3.q.out
@@ -0,0 +1,158 @@
+PREHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(c1 DOUBLE, c2 DOUBLE, c3 DOUBLE, c4 DOUBLE, c5 DOUBLE, c6 DOUBLE, c7 DOUBLE, c8 DOUBLE, c9 DOUBLE) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+ sum(substr(src.value,5)),
+ avg(substr(src.value,5)),
+ avg(DISTINCT substr(src.value,5)),
+ max(substr(src.value,5)),
+ min(substr(src.value,5)),
+ std(substr(src.value,5)),
+ stddev_samp(substr(src.value,5)),
+ variance(substr(src.value,5)),
+ var_samp(substr(src.value,5))
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+ sum(substr(src.value,5)),
+ avg(substr(src.value,5)),
+ avg(DISTINCT substr(src.value,5)),
+ max(substr(src.value,5)),
+ min(substr(src.value,5)),
+ std(substr(src.value,5)),
+ stddev_samp(substr(src.value,5)),
+ variance(substr(src.value,5)),
+ var_samp(substr(src.value,5))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: substr(value, 5) (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(KEY._col0:0._col0), avg(KEY._col0:0._col0), avg(DISTINCT KEY._col0:0._col0), max(KEY._col0:0._col0), min(KEY._col0:0._col0), std(KEY._col0:0._col0), stddev_samp(KEY._col0:0._col0), variance(KEY._col0:0._col0), var_samp(KEY._col0:0._col0)
+ mode: partial1
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double), _col1 (type: struct<count:bigint,sum:double,input:string>), _col2 (type: struct<count:bigint,sum:double,input:string>), _col3 (type: string), _col4 (type: string), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2), max(VALUE._col3), min(VALUE._col4), std(VALUE._col5), stddev_samp(VALUE._col6), variance(VALUE._col7), var_samp(VALUE._col8)
+ mode: final
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+ sum(substr(src.value,5)),
+ avg(substr(src.value,5)),
+ avg(DISTINCT substr(src.value,5)),
+ max(substr(src.value,5)),
+ min(substr(src.value,5)),
+ std(substr(src.value,5)),
+ stddev_samp(substr(src.value,5)),
+ variance(substr(src.value,5)),
+ var_samp(substr(src.value,5))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1 SELECT
+ sum(substr(src.value,5)),
+ avg(substr(src.value,5)),
+ avg(DISTINCT substr(src.value,5)),
+ max(substr(src.value,5)),
+ min(substr(src.value,5)),
+ std(substr(src.value,5)),
+ stddev_samp(substr(src.value,5)),
+ variance(substr(src.value,5)),
+ var_samp(substr(src.value,5))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c7 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c9 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT dest1.* FROM dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest1.* FROM dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+130091.0 260.182 256.10355987055016 98.0 0.0 142.92680950752379 143.06995106518903 20428.07287599999 20469.010897795582
[43/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out
new file mode 100644
index 0000000..4d6ed75
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out
@@ -0,0 +1,1030 @@
+PREHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ /bucket_small/ds=2008-04-09 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 1 => 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 0 Map 1
+ Position of Big Table: 1
+ Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [b]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+38
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ /bucket_small/ds=2008-04-09 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+38
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ /bucket_small/ds=2008-04-09 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+38
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out
new file mode 100644
index 0000000..b463b43
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out
@@ -0,0 +1,780 @@
+PREHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket
+
+-- SORT_QUERY_RESULTS
+
+CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket
+
+-- SORT_QUERY_RESULTS
+
+CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small [a]
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ Position of Big Table: 1
+ Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big [b]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_small
+#### A masked pattern was here ####
+19
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small [b]
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ Position of Big Table: 0
+ Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big [a]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_small
+#### A masked pattern was here ####
+19
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: bucket_big
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: bucket_small
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_small
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_small
+#### A masked pattern was here ####
+19
[49/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_join29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_join29.q.out b/ql/src/test/results/clientpositive/llap/auto_join29.q.out
new file mode 100644
index 0000000..13b3ec4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_join29.q.out
@@ -0,0 +1,3556 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key > 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ Right Outer Join1 to 2
+ filter predicates:
+ 0 {(KEY.reducesinkkey0 < 10)}
+ 1
+ 2 {(KEY.reducesinkkey0 < 10)}
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+ sort order: ++++++
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+NULL NULL NULL NULL 0 val_0
+NULL NULL NULL NULL 0 val_0
+NULL NULL NULL NULL 0 val_0
+NULL NULL NULL NULL 10 val_10
+NULL NULL NULL NULL 100 val_100
+NULL NULL NULL NULL 100 val_100
+NULL NULL NULL NULL 103 val_103
+NULL NULL NULL NULL 103 val_103
+NULL NULL NULL NULL 104 val_104
+NULL NULL NULL NULL 104 val_104
+NULL NULL NULL NULL 105 val_105
+NULL NULL NULL NULL 11 val_11
+NULL NULL NULL NULL 111 val_111
+NULL NULL NULL NULL 113 val_113
+NULL NULL NULL NULL 113 val_113
+NULL NULL NULL NULL 114 val_114
+NULL NULL NULL NULL 116 val_116
+NULL NULL NULL NULL 118 val_118
+NULL NULL NULL NULL 118 val_118
+NULL NULL NULL NULL 119 val_119
+NULL NULL NULL NULL 119 val_119
+NULL NULL NULL NULL 119 val_119
+NULL NULL NULL NULL 12 val_12
+NULL NULL NULL NULL 12 val_12
+NULL NULL NULL NULL 120 val_120
+NULL NULL NULL NULL 120 val_120
+NULL NULL NULL NULL 125 val_125
+NULL NULL NULL NULL 125 val_125
+NULL NULL NULL NULL 126 val_126
+NULL NULL NULL NULL 128 val_128
+NULL NULL NULL NULL 128 val_128
+NULL NULL NULL NULL 128 val_128
+NULL NULL NULL NULL 129 val_129
+NULL NULL NULL NULL 129 val_129
+NULL NULL NULL NULL 131 val_131
+NULL NULL NULL NULL 133 val_133
+NULL NULL NULL NULL 134 val_134
+NULL NULL NULL NULL 134 val_134
+NULL NULL NULL NULL 136 val_136
+NULL NULL NULL NULL 137 val_137
+NULL NULL NULL NULL 137 val_137
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 143 val_143
+NULL NULL NULL NULL 145 val_145
+NULL NULL NULL NULL 146 val_146
+NULL NULL NULL NULL 146 val_146
+NULL NULL NULL NULL 149 val_149
+NULL NULL NULL NULL 149 val_149
+NULL NULL NULL NULL 15 val_15
+NULL NULL NULL NULL 15 val_15
+NULL NULL NULL NULL 150 val_150
+NULL NULL NULL NULL 152 val_152
+NULL NULL NULL NULL 152 val_152
+NULL NULL NULL NULL 153 val_153
+NULL NULL NULL NULL 155 val_155
+NULL NULL NULL NULL 156 val_156
+NULL NULL NULL NULL 157 val_157
+NULL NULL NULL NULL 158 val_158
+NULL NULL NULL NULL 160 val_160
+NULL NULL NULL NULL 162 val_162
+NULL NULL NULL NULL 163 val_163
+NULL NULL NULL NULL 164 val_164
+NULL NULL NULL NULL 164 val_164
+NULL NULL NULL NULL 165 val_165
+NULL NULL NULL NULL 165 val_165
+NULL NULL NULL NULL 166 val_166
+NULL NULL NULL NULL 167 val_167
+NULL NULL NULL NULL 167 val_167
+NULL NULL NULL NULL 167 val_167
+NULL NULL NULL NULL 168 val_168
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 17 val_17
+NULL NULL NULL NULL 170 val_170
+NULL NULL NULL NULL 172 val_172
+NULL NULL NULL NULL 172 val_172
+NULL NULL NULL NULL 174 val_174
+NULL NULL NULL NULL 174 val_174
+NULL NULL NULL NULL 175 val_175
+NULL NULL NULL NULL 175 val_175
+NULL NULL NULL NULL 176 val_176
+NULL NULL NULL NULL 176 val_176
+NULL NULL NULL NULL 177 val_177
+NULL NULL NULL NULL 178 val_178
+NULL NULL NULL NULL 179 val_179
+NULL NULL NULL NULL 179 val_179
+NULL NULL NULL NULL 18 val_18
+NULL NULL NULL NULL 18 val_18
+NULL NULL NULL NULL 180 val_180
+NULL NULL NULL NULL 181 val_181
+NULL NULL NULL NULL 183 val_183
+NULL NULL NULL NULL 186 val_186
+NULL NULL NULL NULL 187 val_187
+NULL NULL NULL NULL 187 val_187
+NULL NULL NULL NULL 187 val_187
+NULL NULL NULL NULL 189 val_189
+NULL NULL NULL NULL 19 val_19
+NULL NULL NULL NULL 190 val_190
+NULL NULL NULL NULL 191 val_191
+NULL NULL NULL NULL 191 val_191
+NULL NULL NULL NULL 192 val_192
+NULL NULL NULL NULL 193 val_193
+NULL NULL NULL NULL 193 val_193
+NULL NULL NULL NULL 193 val_193
+NULL NULL NULL NULL 194 val_194
+NULL NULL NULL NULL 195 val_195
+NULL NULL NULL NULL 195 val_195
+NULL NULL NULL NULL 196 val_196
+NULL NULL NULL NULL 197 val_197
+NULL NULL NULL NULL 197 val_197
+NULL NULL NULL NULL 199 val_199
+NULL NULL NULL NULL 199 val_199
+NULL NULL NULL NULL 199 val_199
+NULL NULL NULL NULL 2 val_2
+NULL NULL NULL NULL 20 val_20
+NULL NULL NULL NULL 200 val_200
+NULL NULL NULL NULL 200 val_200
+NULL NULL NULL NULL 201 val_201
+NULL NULL NULL NULL 202 val_202
+NULL NULL NULL NULL 203 val_203
+NULL NULL NULL NULL 203 val_203
+NULL NULL NULL NULL 205 val_205
+NULL NULL NULL NULL 205 val_205
+NULL NULL NULL NULL 207 val_207
+NULL NULL NULL NULL 207 val_207
+NULL NULL NULL NULL 208 val_208
+NULL NULL NULL NULL 208 val_208
+NULL NULL NULL NULL 208 val_208
+NULL NULL NULL NULL 209 val_209
+NULL NULL NULL NULL 209 val_209
+NULL NULL NULL NULL 213 val_213
+NULL NULL NULL NULL 213 val_213
+NULL NULL NULL NULL 214 val_214
+NULL NULL NULL NULL 216 val_216
+NULL NULL NULL NULL 216 val_216
+NULL NULL NULL NULL 217 val_217
+NULL NULL NULL NULL 217 val_217
+NULL NULL NULL NULL 218 val_218
+NULL NULL NULL NULL 219 val_219
+NULL NULL NULL NULL 219 val_219
+NULL NULL NULL NULL 221 val_221
+NULL NULL NULL NULL 221 val_221
+NULL NULL NULL NULL 222 val_222
+NULL NULL NULL NULL 223 val_223
+NULL NULL NULL NULL 223 val_223
+NULL NULL NULL NULL 224 val_224
+NULL NULL NULL NULL 224 val_224
+NULL NULL NULL NULL 226 val_226
+NULL NULL NULL NULL 228 val_228
+NULL NULL NULL NULL 229 val_229
+NULL NULL NULL NULL 229 val_229
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 233 val_233
+NULL NULL NULL NULL 233 val_233
+NULL NULL NULL NULL 235 val_235
+NULL NULL NULL NULL 237 val_237
+NULL NULL NULL NULL 237 val_237
+NULL NULL NULL NULL 238 val_238
+NULL NULL NULL NULL 238 val_238
+NULL NULL NULL NULL 239 val_239
+NULL NULL NULL NULL 239 val_239
+NULL NULL NULL NULL 24 val_24
+NULL NULL NULL NULL 24 val_24
+NULL NULL NULL NULL 241 val_241
+NULL NULL NULL NULL 242 val_242
+NULL NULL NULL NULL 242 val_242
+NULL NULL NULL NULL 244 val_244
+NULL NULL NULL NULL 247 val_247
+NULL NULL NULL NULL 248 val_248
+NULL NULL NULL NULL 249 val_249
+NULL NULL NULL NULL 252 val_252
+NULL NULL NULL NULL 255 val_255
+NULL NULL NULL NULL 255 val_255
+NULL NULL NULL NULL 256 val_256
+NULL NULL NULL NULL 256 val_256
+NULL NULL NULL NULL 257 val_257
+NULL NULL NULL NULL 258 val_258
+NULL NULL NULL NULL 26 val_26
+NULL NULL NULL NULL 26 val_26
+NULL NULL NULL NULL 260 val_260
+NULL NULL NULL NULL 262 val_262
+NULL NULL NULL NULL 263 val_263
+NULL NULL NULL NULL 265 val_265
+NULL NULL NULL NULL 265 val_265
+NULL NULL NULL NULL 266 val_266
+NULL NULL NULL NULL 27 val_27
+NULL NULL NULL NULL 272 val_272
+NULL NULL NULL NULL 272 val_272
+NULL NULL NULL NULL 273 val_273
+NULL NULL NULL NULL 273 val_273
+NULL NULL NULL NULL 273 val_273
+NULL NULL NULL NULL 274 val_274
+NULL NULL NULL NULL 275 val_275
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 278 val_278
+NULL NULL NULL NULL 278 val_278
+NULL NULL NULL NULL 28 val_28
+NULL NULL NULL NULL 280 val_280
+NULL NULL NULL NULL 280 val_280
+NULL NULL NULL NULL 281 val_281
+NULL NULL NULL NULL 281 val_281
+NULL NULL NULL NULL 282 val_282
+NULL NULL NULL NULL 282 val_282
+NULL NULL NULL NULL 283 val_283
+NULL NULL NULL NULL 284 val_284
+NULL NULL NULL NULL 285 val_285
+NULL NULL NULL NULL 286 val_286
+NULL NULL NULL NULL 287 val_287
+NULL NULL NULL NULL 288 val_288
+NULL NULL NULL NULL 288 val_288
+NULL NULL NULL NULL 289 val_289
+NULL NULL NULL NULL 291 val_291
+NULL NULL NULL NULL 292 val_292
+NULL NULL NULL NULL 296 val_296
+NULL NULL NULL NULL 298 val_298
+NULL NULL NULL NULL 298 val_298
+NULL NULL NULL NULL 298 val_298
+NULL NULL NULL NULL 30 val_30
+NULL NULL NULL NULL 302 val_302
+NULL NULL NULL NULL 305 val_305
+NULL NULL NULL NULL 306 val_306
+NULL NULL NULL NULL 307 val_307
+NULL NULL NULL NULL 307 val_307
+NULL NULL NULL NULL 308 val_308
+NULL NULL NULL NULL 309 val_309
+NULL NULL NULL NULL 309 val_309
+NULL NULL NULL NULL 310 val_310
+NULL NULL NULL NULL 311 val_311
+NULL NULL NULL NULL 311 val_311
+NULL NULL NULL NULL 311 val_311
+NULL NULL NULL NULL 315 val_315
+NULL NULL NULL NULL 316 val_316
+NULL NULL NULL NULL 316 val_316
+NULL NULL NULL NULL 316 val_316
+NULL NULL NULL NULL 317 val_317
+NULL NULL NULL NULL 317 val_317
+NULL NULL NULL NULL 318 val_318
+NULL NULL NULL NULL 318 val_318
+NULL NULL NULL NULL 318 val_318
+NULL NULL NULL NULL 321 val_321
+NULL NULL NULL NULL 321 val_321
+NULL NULL NULL NULL 322 val_322
+NULL NULL NULL NULL 322 val_322
+NULL NULL NULL NULL 323 val_323
+NULL NULL NULL NULL 325 val_325
+NULL NULL NULL NULL 325 val_325
+NULL NULL NULL NULL 327 val_327
+NULL NULL NULL NULL 327 val_327
+NULL NULL NULL NULL 327 val_327
+NULL NULL NULL NULL 33 val_33
+NULL NULL NULL NULL 331 val_331
+NULL NULL NULL NULL 331 val_331
+NULL NULL NULL NULL 332 val_332
+NULL NULL NULL NULL 333 val_333
+NULL NULL NULL NULL 333 val_333
+NULL NULL NULL NULL 335 val_335
+NULL NULL NULL NULL 336 val_336
+NULL NULL NULL NULL 338 val_338
+NULL NULL NULL NULL 339 val_339
+NULL NULL NULL NULL 34 val_34
+NULL NULL NULL NULL 341 val_341
+NULL NULL NULL NULL 342 val_342
+NULL NULL NULL NULL 342 val_342
+NULL NULL NULL NULL 344 val_344
+NULL NULL NULL NULL 344 val_344
+NULL NULL NULL NULL 345 val_345
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 35 val_35
+NULL NULL NULL NULL 35 val_35
+NULL NULL NULL NULL 35 val_35
+NULL NULL NULL NULL 351 val_351
+NULL NULL NULL NULL 353 val_353
+NULL NULL NULL NULL 353 val_353
+NULL NULL NULL NULL 356 val_356
+NULL NULL NULL NULL 360 val_360
+NULL NULL NULL NULL 362 val_362
+NULL NULL NULL NULL 364 val_364
+NULL NULL NULL NULL 365 val_365
+NULL NULL NULL NULL 366 val_366
+NULL NULL NULL NULL 367 val_367
+NULL NULL NULL NULL 367 val_367
+NULL NULL NULL NULL 368 val_368
+NULL NULL NULL NULL 369 val_369
+NULL NULL NULL NULL 369 val_369
+NULL NULL NULL NULL 369 val_369
+NULL NULL NULL NULL 37 val_37
+NULL NULL NULL NULL 37 val_37
+NULL NULL NULL NULL 373 val_373
+NULL NULL NULL NULL 374 val_374
+NULL NULL NULL NULL 375 val_375
+NULL NULL NULL NULL 377 val_377
+NULL NULL NULL NULL 378 val_378
+NULL NULL NULL NULL 379 val_379
+NULL NULL NULL NULL 382 val_382
+NULL NULL NULL NULL 382 val_382
+NULL NULL NULL NULL 384 val_384
+NULL NULL NULL NULL 384 val_384
+NULL NULL NULL NULL 384 val_384
+NULL NULL NULL NULL 386 val_386
+NULL NULL NULL NULL 389 val_389
+NULL NULL NULL NULL 392 val_392
+NULL NULL NULL NULL 393 val_393
+NULL NULL NULL NULL 394 val_394
+NULL NULL NULL NULL 395 val_395
+NULL NULL NULL NULL 395 val_395
+NULL NULL NULL NULL 396 val_396
+NULL NULL NULL NULL 396 val_396
+NULL NULL NULL NULL 396 val_396
+NULL NULL NULL NULL 397 val_397
+NULL NULL NULL NULL 397 val_397
+NULL NULL NULL NULL 399 val_399
+NULL NULL NULL NULL 399 val_399
+NULL NULL NULL NULL 4 val_4
+NULL NULL NULL NULL 400 val_400
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 402 val_402
+NULL NULL NULL NULL 403 val_403
+NULL NULL NULL NULL 403 val_403
+NULL NULL NULL NULL 403 val_403
+NULL NULL NULL NULL 404 val_404
+NULL NULL NULL NULL 404 val_404
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 407 val_407
+NULL NULL NULL NULL 409 val_409
+NULL NULL NULL NULL 409 val_409
+NULL NULL NULL NULL 409 val_409
+NULL NULL NULL NULL 41 val_41
+NULL NULL NULL NULL 411 val_411
+NULL NULL NULL NULL 413 val_413
+NULL NULL NULL NULL 413 val_413
+NULL NULL NULL NULL 414 val_414
+NULL NULL NULL NULL 414 val_414
+NULL NULL NULL NULL 417 val_417
+NULL NULL NULL NULL 417 val_417
+NULL NULL NULL NULL 417 val_417
+NULL NULL NULL NULL 418 val_418
+NULL NULL NULL NULL 419 val_419
+NULL NULL NULL NULL 42 val_42
+NULL NULL NULL NULL 42 val_42
+NULL NULL NULL NULL 421 val_421
+NULL NULL NULL NULL 424 val_424
+NULL NULL NULL NULL 424 val_424
+NULL NULL NULL NULL 427 val_427
+NULL NULL NULL NULL 429 val_429
+NULL NULL NULL NULL 429 val_429
+NULL NULL NULL NULL 43 val_43
+NULL NULL NULL NULL 430 val_430
+NULL NULL NULL NULL 430 val_430
+NULL NULL NULL NULL 430 val_430
+NULL NULL NULL NULL 431 val_431
+NULL NULL NULL NULL 431 val_431
+NULL NULL NULL NULL 431 val_431
+NULL NULL NULL NULL 432 val_432
+NULL NULL NULL NULL 435 val_435
+NULL NULL NULL NULL 436 val_436
+NULL NULL NULL NULL 437 val_437
+NULL NULL NULL NULL 438 val_438
+NULL NULL NULL NULL 438 val_438
+NULL NULL NULL NULL 438 val_438
+NULL NULL NULL NULL 439 val_439
+NULL NULL NULL NULL 439 val_439
+NULL NULL NULL NULL 44 val_44
+NULL NULL NULL NULL 443 val_443
+NULL NULL NULL NULL 444 val_444
+NULL NULL NULL NULL 446 val_446
+NULL NULL NULL NULL 448 val_448
+NULL NULL NULL NULL 449 val_449
+NULL NULL NULL NULL 452 val_452
+NULL NULL NULL NULL 453 val_453
+NULL NULL NULL NULL 454 val_454
+NULL NULL NULL NULL 454 val_454
+NULL NULL NULL NULL 454 val_454
+NULL NULL NULL NULL 455 val_455
+NULL NULL NULL NULL 457 val_457
+NULL NULL NULL NULL 458 val_458
+NULL NULL NULL NULL 458 val_458
+NULL NULL NULL NULL 459 val_459
+NULL NULL NULL NULL 459 val_459
+NULL NULL NULL NULL 460 val_460
+NULL NULL NULL NULL 462 val_462
+NULL NULL NULL NULL 462 val_462
+NULL NULL NULL NULL 463 val_463
+NULL NULL NULL NULL 463 val_463
+NULL NULL NULL NULL 466 val_466
+NULL NULL NULL NULL 466 val_466
+NULL NULL NULL NULL 466 val_466
+NULL NULL NULL NULL 467 val_467
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 47 val_47
+NULL NULL NULL NULL 470 val_470
+NULL NULL NULL NULL 472 val_472
+NULL NULL NULL NULL 475 val_475
+NULL NULL NULL NULL 477 val_477
+NULL NULL NULL NULL 478 val_478
+NULL NULL NULL NULL 478 val_478
+NULL NULL NULL NULL 479 val_479
+NULL NULL NULL NULL 480 val_480
+NULL NULL NULL NULL 480 val_480
+NULL NULL NULL NULL 480 val_480
+NULL NULL NULL NULL 481 val_481
+NULL NULL NULL NULL 482 val_482
+NULL NULL NULL NULL 483 val_483
+NULL NULL NULL NULL 484 val_484
+NULL NULL NULL NULL 485 val_485
+NULL NULL NULL NULL 487 val_487
+NULL NULL NULL NULL 489 val_489
+NULL NULL NULL NULL 489 val_489
+NULL NULL NULL NULL 489 val_489
+NULL NULL NULL NULL 489 val_489
+NULL NULL NULL NULL 490 val_490
+NULL NULL NULL NULL 491 val_491
+NULL NULL NULL NULL 492 val_492
+NULL NULL NULL NULL 492 val_492
+NULL NULL NULL NULL 493 val_493
+NULL NULL NULL NULL 494 val_494
+NULL NULL NULL NULL 495 val_495
+NULL NULL NULL NULL 496 val_496
+NULL NULL NULL NULL 497 val_497
+NULL NULL NULL NULL 498 val_498
+NULL NULL NULL NULL 498 val_498
+NULL NULL NULL NULL 498 val_498
+NULL NULL NULL NULL 5 val_5
+NULL NULL NULL NULL 5 val_5
+NULL NULL NULL NULL 5 val_5
+NULL NULL NULL NULL 51 val_51
+NULL NULL NULL NULL 51 val_51
+NULL NULL NULL NULL 53 val_53
+NULL NULL NULL NULL 54 val_54
+NULL NULL NULL NULL 57 val_57
+NULL NULL NULL NULL 58 val_58
+NULL NULL NULL NULL 58 val_58
+NULL NULL NULL NULL 64 val_64
+NULL NULL NULL NULL 65 val_65
+NULL NULL NULL NULL 66 val_66
+NULL NULL NULL NULL 67 val_67
+NULL NULL NULL NULL 67 val_67
+NULL NULL NULL NULL 69 val_69
+NULL NULL NULL NULL 70 val_70
+NULL NULL NULL NULL 70 val_70
+NULL NULL NULL NULL 70 val_70
+NULL NULL NULL NULL 72 val_72
+NULL NULL NULL NULL 72 val_72
+NULL NULL NULL NULL 74 val_74
+NULL NULL NULL NULL 76 val_76
+NULL NULL NULL NULL 76 val_76
+NULL NULL NULL NULL 77 val_77
+NULL NULL NULL NULL 78 val_78
+NULL NULL NULL NULL 8 val_8
+NULL NULL NULL NULL 80 val_80
+NULL NULL NULL NULL 82 val_82
+NULL NULL NULL NULL 83 val_83
+NULL NULL NULL NULL 83 val_83
+NULL NULL NULL NULL 84 val_84
+NULL NULL NULL NULL 84 val_84
+NULL NULL NULL NULL 85 val_85
+NULL NULL NULL NULL 86 val_86
+NULL NULL NULL NULL 87 val_87
+NULL NULL NULL NULL 9 val_9
+NULL NULL NULL NULL 90 val_90
+NULL NULL NULL NULL 90 val_90
+NULL NULL NULL NULL 90 val_90
+NULL NULL NULL NULL 92 val_92
+NULL NULL NULL NULL 95 val_95
+NULL NULL NULL NULL 95 val_95
+NULL NULL NULL NULL 96 val_96
+NULL NULL NULL NULL 97 val_97
+NULL NULL NULL NULL 97 val_97
+NULL NULL NULL NULL 98 val_98
+NULL NULL NULL NULL 98 val_98
+PREHOOK: query: explain
+SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key > 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((key > 10) and (key < 10)) (type: boolean)
+ Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ Left Outer Join1 to 2
+ filter predicates:
+ 0 {(KEY.reducesinkkey0 < 10)}
+ 1
+ 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+ sort order: ++++++
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 val_0 NULL NULL NULL NULL
+0 val_0 NULL NULL NULL NULL
+0 val_0 NULL NULL NULL NULL
+10 val_10 NULL NULL NULL NULL
+100 val_100 NULL NULL NULL NULL
+100 val_100 NULL NULL NULL NULL
+103 val_103 NULL NULL NULL NULL
+103 val_103 NULL NULL NULL NULL
+104 val_104 NULL NULL NULL NULL
+104 val_104 NULL NULL NULL NULL
+105 val_105 NULL NULL NULL NULL
+11 val_11 NULL NULL NULL NULL
+111 val_111 NULL NULL NULL NULL
+113 val_113 NULL NULL NULL NULL
+113 val_113 NULL NULL NULL NULL
+114 val_114 NULL NULL NULL NULL
+116 val_116 NULL NULL NULL NULL
+118 val_118 NULL NULL NULL NULL
+118 val_118 NULL NULL NULL NULL
+119 val_119 NULL NULL NULL NULL
+119 val_119 NULL NULL NULL NULL
+119 val_119 NULL NULL NULL NULL
+12 val_12 NULL NULL NULL NULL
+12 val_12 NULL NULL NULL NULL
+120 val_120 NULL NULL NULL NULL
+120 val_120 NULL NULL NULL NULL
+125 val_125 NULL NULL NULL NULL
+125 val_125 NULL NULL NULL NULL
+126 val_126 NULL NULL NULL NULL
+128 val_128 NULL NULL NULL NULL
+128 val_128 NULL NULL NULL NULL
+128 val_128 NULL NULL NULL NULL
+129 val_129 NULL NULL NULL NULL
+129 val_129 NULL NULL NULL NULL
+131 val_131 NULL NULL NULL NULL
+133 val_133 NULL NULL NULL NULL
+134 val_134 NULL NULL NULL NULL
+134 val_134 NULL NULL NULL NULL
+136 val_136 NULL NULL NULL NULL
+137 val_137 NULL NULL NULL NULL
+137 val_137 NULL NULL NULL NULL
+138 val_138 NULL NULL NULL NULL
+138 val_138 NULL NULL NULL NULL
+138 val_138 NULL NULL NULL NULL
+138 val_138 NULL NULL NULL NULL
+143 val_143 NULL NULL NULL NULL
+145 val_145 NULL NULL NULL NULL
+146 val_146 NULL NULL NULL NULL
+146 val_146 NULL NULL NULL NULL
+149 val_149 NULL NULL NULL NULL
+149 val_149 NULL NULL NULL NULL
+15 val_15 NULL NULL NULL NULL
+15 val_15 NULL NULL NULL NULL
+150 val_150 NULL NULL NULL NULL
+152 val_152 NULL NULL NULL NULL
+152 val_152 NULL NULL NULL NULL
+153 val_153 NULL NULL NULL NULL
+155 val_155 NULL NULL NULL NULL
+156 val_156 NULL NULL NULL NULL
+157 val_157 NULL NULL NULL NULL
+158 val_158 NULL NULL NULL NULL
+160 val_160 NULL NULL NULL NULL
+162 val_162 NULL NULL NULL NULL
+163 val_163 NULL NULL NULL NULL
+164 val_164 NULL NULL NULL NULL
+164 val_164 NULL NULL NULL NULL
+165 val_165 NULL NULL NULL NULL
+165 val_165 NULL NULL NULL NULL
+166 val_166 NULL NULL NULL NULL
+167 val_167 NULL NULL NULL NULL
+167 val_167 NULL NULL NULL NULL
+167 val_167 NULL NULL NULL NULL
+168 val_168 NULL NULL NULL NULL
+169 val_169 NULL NULL NULL NULL
+169 val_169 NULL NULL NULL NULL
+169 val_169 NULL NULL NULL NULL
+169 val_169 NULL NULL NULL NULL
+17 val_17 NULL NULL NULL NULL
+170 val_170 NULL NULL NULL NULL
+172 val_172 NULL NULL NULL NULL
+172 val_172 NULL NULL NULL NULL
+174 val_174 NULL NULL NULL NULL
+174 val_174 NULL NULL NULL NULL
+175 val_175 NULL NULL NULL NULL
+175 val_175 NULL NULL NULL NULL
+176 val_176 NULL NULL NULL NULL
+176 val_176 NULL NULL NULL NULL
+177 val_177 NULL NULL NULL NULL
+178 val_178 NULL NULL NULL NULL
+179 val_179 NULL NULL NULL NULL
+179 val_179 NULL NULL NULL NULL
+18 val_18 NULL NULL NULL NULL
+18 val_18 NULL NULL NULL NULL
+180 val_180 NULL NULL NULL NULL
+181 val_181 NULL NULL NULL NULL
+183 val_183 NULL NULL NULL NULL
+186 val_186 NULL NULL NULL NULL
+187 val_187 NULL NULL NULL NULL
+187 val_187 NULL NULL NULL NULL
+187 val_187 NULL NULL NULL NULL
+189 val_189 NULL NULL NULL NULL
+19 val_19 NULL NULL NULL NULL
+190 val_190 NULL NULL NULL NULL
+191 val_191 NULL NULL NULL NULL
+191 val_191 NULL NULL NULL NULL
+192 val_192 NULL NULL NULL NULL
+193 val_193 NULL NULL NULL NULL
+193 val_193 NULL NULL NULL NULL
+193 val_193 NULL NULL NULL NULL
+194 val_194 NULL NULL NULL NULL
+195 val_195 NULL NULL NULL NULL
+195 val_195 NULL NULL NULL NULL
+196 val_196 NULL NULL NULL NULL
+197 val_197 NULL NULL NULL NULL
+197 val_197 NULL NULL NULL NULL
+199 val_199 NULL NULL NULL NULL
+199 val_199 NULL NULL NULL NULL
+199 val_199 NULL NULL NULL NULL
+2 val_2 NULL NULL NULL NULL
+20 val_20 NULL NULL NULL NULL
+200 val_200 NULL NULL NULL NULL
+200 val_200 NULL NULL NULL NULL
+201 val_201 NULL NULL NULL NULL
+202 val_202 NULL NULL NULL NULL
+203 val_203 NULL NULL NULL NULL
+203 val_203 NULL NULL NULL NULL
+205 val_205 NULL NULL NULL NULL
+205 val_205 NULL NULL NULL NULL
+207 val_207 NULL NULL NULL NULL
+207 val_207 NULL NULL NULL NULL
+208 val_208 NULL NULL NULL NULL
+208 val_208 NULL NULL NULL NULL
+208 val_208 NULL NULL NULL NULL
+209 val_209 NULL NULL NULL NULL
+209 val_209 NULL NULL NULL NULL
+213 val_213 NULL NULL NULL NULL
+213 val_213 NULL NULL NULL NULL
+214 val_214 NULL NULL NULL NULL
+216 val_216 NULL NULL NULL NULL
+216 val_216 NULL NULL NULL NULL
+217 val_217 NULL NULL NULL NULL
+217 val_217 NULL NULL NULL NULL
+218 val_218 NULL NULL NULL NULL
+219 val_219 NULL NULL NULL NULL
+219 val_219 NULL NULL NULL NULL
+221 val_221 NULL NULL NULL NULL
+221 val_221 NULL NULL NULL NULL
+222 val_222 NULL NULL NULL NULL
+223 val_223 NULL NULL NULL NULL
+223 val_223 NULL NULL NULL NULL
+224 val_224 NULL NULL NULL NULL
+224 val_224 NULL NULL NULL NULL
+226 val_226 NULL NULL NULL NULL
+228 val_228 NULL NULL NULL NULL
+229 val_229 NULL NULL NULL NULL
+229 val_229 NULL NULL NULL NULL
+230 val_230 NULL NULL NULL NULL
+230 val_230 NULL NULL NULL NULL
+230 val_230 NULL NULL NULL NULL
+230 val_230 NULL NULL NULL NULL
+230 val_230 NULL NULL NULL NULL
+233 val_233 NULL NULL NULL NULL
+233 val_233 NULL NULL NULL NULL
+235 val_235 NULL NULL NULL NULL
+237 val_237 NULL NULL NULL NULL
+237 val_237 NULL NULL NULL NULL
+238 val_238 NULL NULL NULL NULL
+238 val_238 NULL NULL NULL NULL
+239 val_239 NULL NULL NULL NULL
+239 val_239 NULL NULL NULL NULL
+24 val_24 NULL NULL NULL NULL
+24 val_24 NULL NULL NULL NULL
+241 val_241 NULL NULL NULL NULL
+242 val_242 NULL NULL NULL NULL
+242 val_242 NULL NULL NULL NULL
+244 val_244 NULL NULL NULL NULL
+247 val_247 NULL NULL NULL NULL
+248 val_248 NULL NULL NULL NULL
+249 val_249 NULL NULL NULL NULL
+252 val_252 NULL NULL NULL NULL
+255 val_255 NULL NULL NULL NULL
+255 val_255 NULL NULL NULL NULL
+256 val_256 NULL NULL NULL NULL
+256 val_256 NULL NULL NULL NULL
+257 val_257 NULL NULL NULL NULL
+258 val_258 NULL NULL NULL NULL
+26 val_26 NULL NULL NULL NULL
+26 val_26 NULL NULL NULL NULL
+260 val_260 NULL NULL NULL NULL
+262 val_262 NULL NULL NULL NULL
+263 val_263 NULL NULL NULL NULL
+265 val_265 NULL NULL NULL NULL
+265 val_265 NULL NULL NULL NULL
+266 val_266 NULL NULL NULL NULL
+27 val_27 NULL NULL NULL NULL
+272 val_272 NULL NULL NULL NULL
+272 val_272 NULL NULL NULL NULL
+273 val_273 NULL NULL NULL NULL
+273 val_273 NULL NULL NULL NULL
+273 val_273 NULL NULL NULL NULL
+274 val_274 NULL NULL NULL NULL
+275 val_275 NULL NULL NULL NULL
+277 val_277 NULL NULL NULL NULL
+277 val_277 NULL NULL NULL NULL
+277 val_277 NULL NULL NULL NULL
+277 val_277 NULL NULL NULL NULL
+278 val_278 NULL NULL NULL NULL
+278 val_278 NULL NULL NULL NULL
+28 val_28 NULL NULL NULL NULL
+280 val_280 NULL NULL NULL NULL
+280 val_280 NULL NULL NULL NULL
+281 val_281 NULL NULL NULL NULL
+281 val_281 NULL NULL NULL NULL
+282 val_282 NULL NULL NULL NULL
+282 val_282 NULL NULL NULL NULL
+283 val_283 NULL NULL NULL NULL
+284 val_284 NULL NULL NULL NULL
+285 val_285 NULL NULL NULL NULL
+286 val_286 NULL NULL NULL NULL
+287 val_287 NULL NULL NULL NULL
+288 val_288 NULL NULL NULL NULL
+288 val_288 NULL NULL NULL NULL
+289 val_289 NULL NULL NULL NULL
+291 val_291 NULL NULL NULL NULL
+292 val_292 NULL NULL NULL NULL
+296 val_296 NULL NULL NULL NULL
+298 val_298 NULL NULL NULL NULL
+298 val_298 NULL NULL NULL NULL
+298 val_298 NULL NULL NULL NULL
+30 val_30 NULL NULL NULL NULL
+302 val_302 NULL NULL NULL NULL
+305 val_305 NULL NULL NULL NULL
+306 val_306 NULL NULL NULL NULL
+307 val_307 NULL NULL NULL NULL
+307 val_307 NULL NULL NULL NULL
+308 val_308 NULL NULL NULL NULL
+309 val_309 NULL NULL NULL NULL
+309 val_309 NULL NULL NULL NULL
+310 val_310 NULL NULL NULL NULL
+311 val_311 NULL NULL NULL NULL
+311 val_311 NULL NULL NULL NULL
+311 val_311 NULL NULL NULL NULL
+315 val_315 NULL NULL NULL NULL
+316 val_316 NULL NULL NULL NULL
+316 val_316 NULL NULL NULL NULL
+316 val_316 NULL NULL NULL NULL
+317 val_317 NULL NULL NULL NULL
+317 val_317 NULL NULL NULL NULL
+318 val_318 NULL NULL NULL NULL
+318 val_318 NULL NULL NULL NULL
+318 val_318 NULL NULL NULL NULL
+321 val_321 NULL NULL NULL NULL
+321 val_321 NULL NULL NULL NULL
+322 val_322 NULL NULL NULL NULL
+322 val_322 NULL NULL NULL NULL
+323 val_323 NULL NULL NULL NULL
+325 val_325 NULL NULL NULL NULL
+325 val_325 NULL NULL NULL NULL
+327 val_327 NULL NULL NULL NULL
+327 val_327 NULL NULL NULL NULL
+327 val_327 NULL NULL NULL NULL
+33 val_33 NULL NULL NULL NULL
+331 val_331 NULL NULL NULL NULL
+331 val_331 NULL NULL NULL NULL
+332 val_332 NULL NULL NULL NULL
+333 val_333 NULL NULL NULL NULL
+333 val_333 NULL NULL NULL NULL
+335 val_335 NULL NULL NULL NULL
+336 val_336 NULL NULL NULL NULL
+338 val_338 NULL NULL NULL NULL
+339 val_339 NULL NULL NULL NULL
+34 val_34 NULL NULL NULL NULL
+341 val_341 NULL NULL NULL NULL
+342 val_342 NULL NULL NULL NULL
+342 val_342 NULL NULL NULL NULL
+344 val_344 NULL NULL NULL NULL
+344 val_344 NULL NULL NULL NULL
+345 val_345 NULL NULL NULL NULL
+348 val_348 NULL NULL NULL NULL
+348 val_348 NULL NULL NULL NULL
+348 val_348 NULL NULL NULL NULL
+348 val_348 NULL NULL NULL NULL
+348 val_348 NULL NULL NULL NULL
+35 val_35 NULL NULL NULL NULL
+35 val_35 NULL NULL NULL NULL
+35 val_35 NULL NULL NULL NULL
+351 val_351 NULL NULL NULL NULL
+353 val_353 NULL NULL NULL NULL
+353 val_353 NULL NULL NULL NULL
+356 val_356 NULL NULL NULL NULL
+360 val_360 NULL NULL NULL NULL
+362 val_362 NULL NULL NULL NULL
+364 val_364 NULL NULL NULL NULL
+365 val_365 NULL NULL NULL NULL
+366 val_366 NULL NULL NULL NULL
+367 val_367 NULL NULL NULL NULL
+367 val_367 NULL NULL NULL NULL
+368 val_368 NULL NULL NULL NULL
+369 val_369 NULL NULL NULL NULL
+369 val_369 NULL NULL NULL NULL
+369 val_369 NULL NULL NULL NULL
+37 val_37 NULL NULL NULL NULL
+37 val_37 NULL NULL NULL NULL
+373 val_373 NULL NULL NULL NULL
+374 val_374 NULL NULL NULL NULL
+375 val_375 NULL NULL NULL NULL
+377 val_377 NULL NULL NULL NULL
+378 val_378 NULL NULL NULL NULL
+379 val_379 NULL NULL NULL NULL
+382 val_382 NULL NULL NULL NULL
+382 val_382 NULL NULL NULL NULL
+384 val_384 NULL NULL NULL NULL
+384 val_384 NULL NULL NULL NULL
+384 val_384 NULL NULL NULL NULL
+386 val_386 NULL NULL NULL NULL
+389 val_389 NULL NULL NULL NULL
+392 val_392 NULL NULL NULL NULL
+393 val_393 NULL NULL NULL NULL
+394 val_394 NULL NULL NULL NULL
+395 val_395 NULL NULL NULL NULL
+395 val_395 NULL NULL NULL NULL
+396 val_396 NULL NULL NULL NULL
+396 val_396 NULL NULL NULL NULL
+396 val_396 NULL NULL NULL NULL
+397 val_397 NULL NULL NULL NULL
+397 val_397 NULL NULL NULL NULL
+399 val_399 NULL NULL NULL NULL
+399 val_399 NULL NULL NULL NULL
+4 val_4 NULL NULL NULL NULL
+400 val_400 NULL NULL NULL NULL
+401 val_401 NULL NULL NULL NULL
+401 val_401 NULL NULL NULL NULL
+401 val_401 NULL NULL NULL NULL
+401 val_401 NULL NULL NULL NULL
+401 val_401 NULL NULL NULL NULL
+402 val_402 NULL NULL NULL NULL
+403 val_403 NULL NULL NULL NULL
+403 val_403 NULL NULL NULL NULL
+403 val_403 NULL NULL NULL NULL
+404 val_404 NULL NULL NULL NULL
+404 val_404 NULL NULL NULL NULL
+406 val_406 NULL NULL NULL NULL
+406 val_406 NULL NULL NULL NULL
+406 val_406 NULL NULL NULL NULL
+406 val_406 NULL NULL NULL NULL
+407 val_407 NULL NULL NULL NULL
+409 val_409 NULL NULL NULL NULL
+409 val_409 NULL NULL NULL NULL
+409 val_409 NULL NULL NULL NULL
+41 val_41 NULL NULL NULL NULL
+411 val_411 NULL NULL NULL NULL
+413 val_413 NULL NULL NULL NULL
+413 val_413 NULL NULL NULL NULL
+414 val_414 NULL NULL NULL NULL
+414 val_414 NULL NULL NULL NULL
+417 val_417 NULL NULL NULL NULL
+417 val_417 NULL NULL NULL NULL
+417 val_417 NULL NULL NULL NULL
+418 val_418 NULL NULL NULL NULL
+419 val_419 NULL NULL NULL NULL
+42 val_42 NULL NULL NULL NULL
+42 val_42 NULL NULL NULL NULL
+421 val_421 NULL NULL NULL NULL
+424 val_424 NULL NULL NULL NULL
+424 val_424 NULL NULL NULL NULL
+427 val_427 NULL NULL NULL NULL
+429 val_429 NULL NULL NULL NULL
+429 val_429 NULL NULL NULL NULL
+43 val_43 NULL NULL NULL NULL
+430 val_430 NULL NULL NULL NULL
+430 val_430 NULL NULL NULL NULL
+430 val_430 NULL NULL NULL NULL
+431 val_431 NULL NULL NULL NULL
+431 val_431 NULL NULL NULL NULL
+431 val_431 NULL NULL NULL NULL
+432 val_432 NULL NULL NULL NULL
+435 val_435 NULL NULL NULL NULL
+436 val_436 NULL NULL NULL NULL
+437 val_437 NULL NULL NULL NULL
+438 val_438 NULL NULL NULL NULL
+438 val_438 NULL NULL NULL NULL
+438 val_438 NULL NULL NULL NULL
+439 val_439 NULL NULL NULL NULL
+439 val_439 NULL NULL NULL NULL
+44 val_44 NULL NULL NULL NULL
+443 val_443 NULL NULL NULL NULL
+444 val_444 NULL NULL NULL NULL
+446 val_446 NULL NULL NULL NULL
+448 val_448 NULL NULL NULL NULL
+449 val_449 NULL NULL NULL NULL
+452 val_452 NULL NULL NULL NULL
+453 val_453 NULL NULL NULL NULL
+454 val_454 NULL NULL NULL NULL
+454 val_454 NULL NULL NULL NULL
+454 val_454 NULL NULL NULL NULL
+455 val_455 NULL NULL NULL NULL
+457 val_457 NULL NULL NULL NULL
+458 val_458 NULL NULL NULL NULL
+458 val_458 NULL NULL NULL NULL
+459 val_459 NULL NULL NULL NULL
+459 val_459 NULL NULL NULL NULL
+460 val_460 NULL NULL NULL NULL
+462 val_462 NULL NULL NULL NULL
+462 val_462 NULL NULL NULL NULL
+463 val_463 NULL NULL NULL NULL
+463 val_463 NULL NULL NULL NULL
+466 val_466 NULL NULL NULL NULL
+466 val_466 NULL NULL NULL NULL
+466 val_466 NULL NULL NULL NULL
+467 val_467 NULL NULL NULL NULL
+468 val_468 NULL NULL NULL NULL
+468 val_468 NULL NULL NULL NULL
+468 val_468 NULL NULL NULL NULL
+468 val_468 NULL NULL NULL NULL
+469 val_469 NULL NULL NULL NULL
+469 val_469 NULL NULL NULL NULL
+469 val_469 NULL NULL NULL NULL
+469 val_469 NULL NULL NULL NULL
+469 val_469 NULL NULL NULL NULL
+47 val_47 NULL NULL NULL NULL
+470 val_470 NULL NULL NULL NULL
+472 val_472 NULL NULL NULL NULL
+475 val_475 NULL NULL NULL NULL
+477 val_477 NULL NULL NULL NULL
+478 val_478 NULL NULL NULL NULL
+478 val_478 NULL NULL NULL NULL
+479 val_479 NULL NULL NULL NULL
+480 val_480 NULL NULL NULL NULL
+480 val_480 NULL NULL NULL NULL
+480 val_480 NULL NULL NULL NULL
+481 val_481 NULL NULL NULL NULL
+482 val_482 NULL NULL NULL NULL
+483 val_483 NULL NULL NULL NULL
+484 val_484 NULL NULL NULL NULL
+485 val_485 NULL NULL NULL NULL
+487 val_487 NULL NULL NULL NULL
+489 val_489 NULL NULL NULL NULL
+489 val_489 NULL NULL NULL NULL
+489 val_489 NULL NULL NULL NULL
+489 val_489 NULL NULL NULL NULL
+490 val_490 NULL NULL NULL NULL
+491 val_491 NULL NULL NULL NULL
+492 val_492 NULL NULL NULL NULL
+492 val_492 NULL NULL NULL NULL
+493 val_493 NULL NULL NULL NULL
+494 val_494 NULL NULL NULL NULL
+495 val_495 NULL NULL NULL NULL
+496 val_496 NULL NULL NULL NULL
+497 val_497 NULL NULL NULL NULL
+498 val_498 NULL NULL NULL NULL
+498 val_498 NULL NULL NULL NULL
+498 val_498 NULL NULL NULL NULL
+5 val_5 NULL NULL NULL NULL
+5 val_5 NULL NULL NULL NULL
+5 val_5 NULL NULL NULL NULL
+51 val_51 NULL NULL NULL NULL
+51 val_51 NULL NULL NULL NULL
+53 val_53 NULL NULL NULL NULL
+54 val_54 NULL NULL NULL NULL
+57 val_57 NULL NULL NULL NULL
+58 val_58 NULL NULL NULL NULL
+58 val_58 NULL NULL NULL NULL
+64 val_64 NULL NULL NULL NULL
+65 val_65 NULL NULL NULL NULL
+66 val_66 NULL NULL NULL NULL
+67 val_67 NULL NULL NULL NULL
+67 val_67 NULL NULL NULL NULL
+69 val_69 NULL NULL NULL NULL
+70 val_70 NULL NULL NULL NULL
+70 val_70 NULL NULL NULL NULL
+70 val_70 NULL NULL NULL NULL
+72 val_72 NULL NULL NULL NULL
+72 val_72 NULL NULL NULL NULL
+74 val_74 NULL NULL NULL NULL
+76 val_76 NULL NULL NULL NULL
+76 val_76 NULL NULL NULL NULL
+77 val_77 NULL NULL NULL NULL
+78 val_78 NULL NULL NULL NULL
+8 val_8 NULL NULL NULL NULL
+80 val_80 NULL NULL NULL NULL
+82 val_82 NULL NULL NULL NULL
+83 val_83 NULL NULL NULL NULL
+83 val_83 NULL NULL NULL NULL
+84 val_84 NULL NULL NULL NULL
+84 val_84 NULL NULL NULL NULL
+85 val_85 NULL NULL NULL NULL
+86 val_86 NULL NULL NULL NULL
+87 val_87 NULL NULL NULL NULL
+9 val_9 NULL NULL NULL NULL
+90 val_90 NULL NULL NULL NULL
+90 val_90 NULL NULL NULL NULL
+90 val_90 NULL NULL NULL NULL
+92 val_92 NULL NULL NULL NULL
+95 val_95 NULL NULL NULL NULL
+95 val_95 NULL NULL NULL NULL
+96 val_96 NULL NULL NULL NULL
+97 val_97 NULL NULL NULL NULL
+97 val_97 NULL NULL NULL NULL
+98 val_98 NULL NULL NULL NULL
+98 val_98 NULL NULL NULL NULL
+PREHOOK: query: explain
+SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ Left Outer Join1 to 2
+ filter predicates:
+ 0
+ 1 {(KEY.reducesinkkey0 > 10)}
+ 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+ sort order: ++++++
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) LEFT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 10 val_10 NULL NULL
+NULL NULL 100 val_100 NULL NULL
+NULL NULL 100 val_100 NULL NULL
+NULL NULL 103 val_103 NULL NULL
+NULL NULL 103 val_103 NULL NULL
+NULL NULL 104 val_104 NULL NULL
+NULL NULL 104 val_104 NULL NULL
+NULL NULL 105 val_105 NULL NULL
+NULL NULL 11 val_11 NULL NULL
+NULL NULL 111 val_111 NULL NULL
+NULL NULL 113 val_113 NULL NULL
+NULL NULL 113 val_113 NULL NULL
+NULL NULL 114 val_114 NULL NULL
+NULL NULL 116 val_116 NULL NULL
+NULL NULL 118 val_118 NULL NULL
+NULL NULL 118 val_118 NULL NULL
+NULL NULL 119 val_119 NULL NULL
+NULL NULL 119 val_119 NULL NULL
+NULL NULL 119 val_119 NULL NULL
+NULL NULL 12 val_12 NULL NULL
+NULL NULL 12 val_12 NULL NULL
+NULL NULL 120 val_120 NULL NULL
+NULL NULL 120 val_120 NULL NULL
+NULL NULL 125 val_125 NULL NULL
+NULL NULL 125 val_125 NULL NULL
+NULL NULL 126 val_126 NULL NULL
+NULL NULL 128 val_128 NULL NULL
+NULL NULL 128 val_128 NULL NULL
+NULL NULL 128 val_128 NULL NULL
+NULL NULL 129 val_129 NULL NULL
+NULL NULL 129 val_129 NULL NULL
+NULL NULL 131 val_131 NULL NULL
+NULL NULL 133 val_133 NULL NULL
+NULL NULL 134 val_134 NULL NULL
+NULL NULL 134 val_134 NULL NULL
+NULL NULL 136 val_136 NULL NULL
+NULL NULL 137 val_137 NULL NULL
+NULL NULL 137 val_137 NULL NULL
+NULL NULL 138 val_138 NULL NULL
+NULL NULL 138 val_138 NULL NULL
+NULL NULL 138 val_138 NULL NULL
+NULL NULL 138 val_138 NULL NULL
+NULL NULL 143 val_143 NULL NULL
+NULL NULL 145 val_145 NULL NULL
+NULL NULL 146 val_146 NULL NULL
+NULL NULL 146 val_146 NULL NULL
+NULL NULL 149 val_149 NULL NULL
+NULL NULL 149 val_149 NULL NULL
+NULL NULL 15 val_15 NULL NULL
+NULL NULL 15 val_15 NULL NULL
+NULL NULL 150 val_150 NULL NULL
+NULL NULL 152 val_152 NULL NULL
+NULL NULL 152 val_152 NULL NULL
+NULL NULL 153 val_153 NULL NULL
+NULL NULL 155 val_155 NULL NULL
+NULL NULL 156 val_156 NULL NULL
+NULL NULL 157 val_157 NULL NULL
+NULL NULL 158 val_158 NULL NULL
+NULL NULL 160 val_160 NULL NULL
+NULL NULL 162 val_162 NULL NULL
+NULL NULL 163 val_163 NULL NULL
+NULL NULL 164 val_164 NULL NULL
+NULL NULL 164 val_164 NULL NULL
+NULL NULL 165 val_165 NULL NULL
+NULL NULL 165 val_165 NULL NULL
+NULL NULL 166 val_166 NULL NULL
+NULL NULL 167 val_167 NULL NULL
+NULL NULL 167 val_167 NULL NULL
+NULL NULL 167 val_167 NULL NULL
+NULL NULL 168 val_168 NULL NULL
+NULL NULL 169 val_169 NULL NULL
+NULL NULL 169 val_169 NULL NULL
+NULL NULL 169 val_169 NULL NULL
+NULL NULL 169 val_169 NULL NULL
+NULL NULL 17 val_17 NULL NULL
+NULL NULL 170 val_170 NULL NULL
+NULL NULL 172 val_172 NULL NULL
+NULL NULL 172 val_172 NULL NULL
+NULL NULL 174 val_174 NULL NULL
+NULL NULL 174 val_174 NULL NULL
+NULL NULL 175 val_175 NULL NULL
+NULL NULL 175 val_175 NULL NULL
+NULL NULL 176 val_176 NULL NULL
+NULL NULL 176 val_176 NULL NULL
+NULL NULL 177 val_177 NULL NULL
+NULL NULL 178 val_178 NULL NULL
+NULL NULL 179 val_179 NULL NULL
+NULL NULL 179 val_179 NULL NULL
+NULL NULL 18 val_18 NULL NULL
+NULL NULL 18 val_18 NULL NULL
+NULL NULL 180 val_180 NULL NULL
+NULL NULL 181 val_181 NULL NULL
+NULL NULL 183 val_183 NULL NULL
+NULL NULL 186 val_186 NULL NULL
+NULL NULL 187 val_187 NULL NULL
+NULL NULL 187 val_187 NULL NULL
+NULL NULL 187 val_187 NULL NULL
+NULL NULL 189 val_189 NULL NULL
+NULL NULL 19 val_19 NULL NULL
+NULL NULL 190 val_190 NULL NULL
+NULL NULL 191 val_191 NULL NULL
+NULL NULL 191 val_191 NULL NULL
+NULL NULL 192 val_192 NULL NULL
+NULL NULL 193 val_193 NULL NULL
+NULL NULL 193 val_193 NULL NULL
+NULL NULL 193 val_193 NULL NULL
+NULL NULL 194 val_194 NULL NULL
+NULL NULL 195 val_195 NULL NULL
+NULL NULL 195 val_195 NULL NULL
+NULL NULL 196 val_196 NULL NULL
+NULL NULL 197 val_197 NULL NULL
+NULL NULL 197 val_197 NULL NULL
+NULL NULL 199 val_199 NULL NULL
+NULL NULL 199 val_199 NULL NULL
+NULL NULL 199 val_199 NULL NULL
+NULL NULL 2 val_2 2 val_2
+NULL NULL 20 val_20 NULL NULL
+NULL NULL 200 val_200 NULL NULL
+NULL NULL 200 val_200 NULL NULL
+NULL NULL 201 val_201 NULL NULL
+NULL NULL 202 val_202 NULL NULL
+NULL NULL 203 val_203 NULL NULL
+NULL NULL 203 val_203 NULL NULL
+NULL NULL 205 val_205 NULL NULL
+NULL NULL 205 val_205 NULL NULL
+NULL NULL 207 val_207 NULL NULL
+NULL NULL 207 val_207 NULL NULL
+NULL NULL 208 val_208 NULL NULL
+NULL NULL 208 val_208 NULL NULL
+NULL NULL 208 val_208 NULL NULL
+NULL NULL 209 val_209 NULL NULL
+NULL NULL 209 val_209 NULL NULL
+NULL NULL 213 val_213 NULL NULL
+NULL NULL 213 val_213 NULL NULL
+NULL NULL 214 val_214 NULL NULL
+NULL NULL 216 val_216 NULL NULL
+NULL NULL 216 val_216 NULL NULL
+NULL NULL 217 val_217 NULL NULL
+NULL NULL 217 val_217 NULL NULL
+NULL NULL 218 val_218 NULL NULL
+NULL NULL 219 val_219 NULL NULL
+NULL NULL 219 val_219 NULL NULL
+NULL NULL 221 val_221 NULL NULL
+NULL NULL 221 val_221 NULL NULL
+NULL NULL 222 val_222 NULL NULL
+NULL NULL 223 val_223 NULL NULL
+NULL NULL 223 val_223 NULL NULL
+NULL NULL 224 val_224 NULL NULL
+NULL NULL 224 val_224 NULL NULL
+NULL NULL 226 val_226 NULL NULL
+NULL NULL 228 val_228 NULL NULL
+NULL NULL 229 val_229 NULL NULL
+NULL NULL 229 val_229 NULL NULL
+NULL NULL 230 val_230 NULL NULL
+NULL NULL 230 val_230 NULL NULL
+NULL NULL 230 val_230 NULL NULL
+NULL NULL 230 val_230 NULL NULL
+NULL NULL 230 val_230 NULL NULL
+NULL NULL 233 val_233 NULL NULL
+NULL NULL 233 val_233 NULL NULL
+NULL NULL 235 val_235 NULL NULL
+NULL NULL 237 val_237 NULL NULL
+NULL NULL 237 val_237 NULL NULL
+NULL NULL 238 val_238 NULL NULL
+NULL NULL 238 val_238 NULL NULL
+NULL NULL 239 val_239 NULL NULL
+NULL NULL 239 val_239 NULL NULL
+NULL NULL 24 val_24 NULL NULL
+NULL NULL 24 val_24 NULL NULL
+NULL NULL 241 val_241 NULL NULL
+NULL NULL 242 val_242 NULL NULL
+NULL NULL 242 val_242 NULL NULL
+NULL NULL 244 val_244 NULL NULL
+NULL NULL 247 val_247 NULL NULL
+NULL NULL 248 val_248 NULL NULL
+NULL NULL 249 val_249 NULL NULL
+NULL NULL 252 val_252 NULL NULL
+NULL NULL 255 val_255 NULL NULL
+NULL NULL 255 val_255 NULL NULL
+NULL NULL 256 val_256 NULL NULL
+NULL NULL 256 val_256 NULL NULL
+NULL NULL 257 val_257 NULL NULL
+NULL NULL 258 val_258 NULL NULL
+NULL NULL 26 val_26 NULL NULL
+NULL NULL 26 val_26 NULL NULL
+NULL NULL 260 val_260 NULL NULL
+NULL NULL 262 val_262 NULL NULL
+NULL NULL 263 val_263 NULL NULL
+NULL NULL 265 val_265 NULL NULL
+NULL NULL 265 val_265 NULL NULL
+NULL NULL 266 val_266 NULL NULL
+NULL NULL 27 val_27 NULL NULL
+NULL NULL 272 val_272 NULL NULL
+NULL NULL 272 val_272 NULL NULL
+NULL NULL 273 val_273 NULL NULL
+NULL NULL 273 val_273 NULL NULL
+NULL NULL 273 val_273 NULL NULL
+NULL NULL 274 val_274 NULL NULL
+NULL NULL 275 val_275 NULL NULL
+NULL NULL 277 val_277 NULL NULL
+NULL NULL 277 val_277 NULL NULL
+NULL NULL 277 val_277 NULL NULL
+NULL NULL 277 val_277 NULL NULL
+NULL NULL 278 val_278 NULL NULL
+NULL NULL 278 val_278 NULL NULL
+NULL NULL 28 val_28 NULL NULL
+NULL NULL 280 val_280 NULL NULL
+NULL NULL 280 val_280 NULL NULL
+NULL NULL 281 val_281 NULL NULL
+NULL NULL 281 val_281 NULL NULL
+NULL NULL 282 val_282 NULL NULL
+NULL NULL 282 val_282 NULL NULL
+NULL NULL 283 val_283 NULL NULL
+NULL NULL 284 val_284 NULL NULL
+NULL NULL 285 val_285 NULL NULL
+NULL NULL 286 val_286 NULL NULL
+NULL NULL 287 val_287 NULL NULL
+NULL NULL 288 val_288 NULL NULL
+NULL NULL 288 val_288 NULL NULL
+NULL NULL 289 val_289 NULL NULL
+NULL NULL 291 val_291 NULL NULL
+NULL NULL 292 val_292 NULL NULL
+NULL NULL 296 val_296 NULL NULL
+NULL NULL 298 val_298 NULL NULL
+NULL NULL 298 val_298 NULL NULL
+NULL NULL 298 val_298 NULL NULL
+NULL NULL 30 val_30 NULL NULL
+NULL NULL 302 val_302 NULL NULL
+NULL NULL 305 val_305 NULL NULL
+NULL NULL 306 val_306 NULL NULL
+NULL NULL 307 val_307 NULL NULL
+NULL NULL 307 val_307 NULL NULL
+NULL NULL 308 val_308 NULL NULL
+NULL NULL 309 val_309 NULL NULL
+NULL NULL 309 val_309 NULL NULL
+NULL NULL 310 val_310 NULL NULL
+NULL NULL 311 val_311 NULL NULL
+NULL NULL 311 val_311 NULL NULL
+NULL NULL 311 val_311 NULL NULL
+NULL NULL 315 val_315 NULL NULL
+NULL NULL 316 val_316 NULL NULL
+NULL NULL 316 val_316 NULL NULL
+NULL NULL 316 val_316 NULL NULL
+NULL NULL 317 val_317 NULL NULL
+NULL NULL 317 val_317 NULL NULL
+NULL NULL 318 val_318 NULL NULL
+NULL NULL 318 val_318 NULL NULL
+NULL NULL 318 val_318 NULL NULL
+NULL NULL 321 val_321 NULL NULL
+NULL NULL 321 val_321 NULL NULL
+NULL NULL 322 val_322 NULL NULL
+NULL NULL 322 val_322 NULL NULL
+NULL NULL 323 val_323 NULL NULL
+NULL NULL 325 val_325 NULL NULL
+NULL NULL 325 val_325 NULL NULL
+NULL NULL 327 val_327 NULL NULL
+NULL NULL 327 val_327 NULL NULL
+NULL NULL 327 val_327 NULL NULL
+NULL NULL 33 val_33 NULL NULL
+NULL NULL 331 val_331 NULL NULL
+NULL NULL 331 val_331 NULL NULL
+NULL NULL 332 val_332 NULL NULL
+NULL NULL 333 val_333 NULL NULL
+NULL NULL 333 val_333 NULL NULL
+NULL NULL 335 val_335 NULL NULL
+NULL NULL 336 val_336 NULL NULL
+NULL NULL 338 val_338 NULL NULL
+NULL NULL 339 val_339 NULL NULL
+NULL NULL 34 val_34 NULL NULL
+NULL NULL 341 val_341 NULL NULL
+NULL NULL 342 val_342 NULL NULL
+NULL NULL 342 val_342 NULL NULL
+NULL NULL 344 val_344 NULL NULL
+NULL NULL 344 val_344 NULL NULL
+NULL NULL 345 val_345 NULL NULL
+NULL NULL 348 val_348 NULL NULL
+NULL NULL 348 val_348 NULL NULL
+NULL NULL 348 val_348 NULL NULL
+NULL NULL 348 val_348 NULL NULL
+NULL NULL 348 val_348 NULL NULL
+NULL NULL 35 val_35 NULL NULL
+NULL NULL 35 val_35 NULL NULL
+NULL NULL 35 val_35 NULL NULL
+NULL NULL 351 val_351 NULL NULL
+NULL NULL 353 val_353 NULL NULL
+NULL NULL 353 val_353 NULL NULL
+NULL NULL 356 val_356 NULL NULL
+NULL NULL 360 val_360 NULL NULL
+NULL NULL 362 val_362 NULL NULL
+NULL NULL 364 val_364 NULL NULL
+NULL NULL 365 val_365 NULL NULL
+NULL NULL 366 val_366 NULL NULL
+NULL NULL 367 val_367 NULL NULL
+NULL NULL 367 val_367 NULL NULL
+NULL NULL 368 val_368 NULL NULL
+NULL NULL 369 val_369 NULL NULL
+NULL NULL 369 val_369 NULL NULL
+NULL NULL 369 val_369 NULL NULL
+NULL NULL 37 val_37 NULL NULL
+NULL NULL 37 val_37 NULL NULL
+NULL NULL 373 val_373 NULL NULL
+NULL NULL 374 val_374 NULL NULL
+NULL NULL 375 val_375 NULL NULL
+NULL NULL 377 val_377 NULL NULL
+NULL NULL 378 val_378 NULL NULL
+NULL NULL 379 val_379 NULL NULL
+NULL NULL 382 val_382 NULL NULL
+NULL NULL 382 val_382 NULL NULL
+NULL NULL 384 val_384 NULL NULL
+NULL NULL 384 val_384 NULL NULL
+NULL NULL 384 val_384 NULL NULL
+NULL NULL 386 val_386 NULL NULL
+NULL NULL 389 val_389 NULL NULL
+NULL NULL 392 val_392 NULL NULL
+NULL NULL 393 val_393 NULL NULL
+NULL NULL 394 val_394 NULL NULL
+NULL NULL 395 val_395 NULL NULL
+NULL NULL 395 val_395 NULL NULL
+NULL NULL 396 val_396 NULL NULL
+NULL NULL 396 val_396 NULL NULL
+NULL NULL 396 val_396 NULL NULL
+NULL NULL 397 val_397 NULL NULL
+NULL NULL 397 val_397 NULL NULL
+NULL NULL 399 val_399 NULL NULL
+NULL NULL 399 val_399 NULL NULL
+NULL NULL 4 val_4 4 val_4
+NULL NULL 400 val_400 NULL NULL
+NULL NULL 401 val_401 NULL NULL
+NULL NULL 401 val_401 NULL NULL
+NULL NULL 401 val_401 NULL NULL
+NULL NULL 401 val_401 NULL NULL
+NULL NULL 401 val_401 NULL NULL
+NULL NULL 402 val_402 NULL NULL
+NULL NULL 403 val_403 NULL NULL
+NULL NULL 403 val_403 NULL NULL
+NULL NULL 403 val_403 NULL NULL
+NULL NULL 404 val_404 NULL NULL
+NULL NULL 404 val_404 NULL NULL
+NULL NULL 406 val_406 NULL NULL
+NULL NULL 406 val_406 NULL NULL
+NULL NULL 406 val_406 NULL NULL
+NULL NULL 406 val_406 NULL NULL
+NULL NULL 407 val_407 NULL NULL
+NULL NULL 409 val_409 NULL NULL
+NULL NULL 409 val_409 NULL NULL
+NULL NULL 409 val_409 NULL NULL
+NULL NULL 41 val_41 NULL NULL
+NULL NULL 411 val_411 NULL NULL
+NULL NULL 413 val_413 NULL NULL
+NULL NULL 413 val_413 NULL NULL
+NULL NULL 414 val_414 NULL NULL
+NULL NULL 414 val_414 NULL NULL
+NULL NULL 417 val_417 NULL NULL
+NULL NULL 417 val_417 NULL NULL
+NULL NULL 417 val_417 NULL NULL
+NULL NULL 418 val_418 NULL NULL
+NULL NULL 419 val_419 NULL NULL
+NULL NULL 42 val_42 NULL NULL
+NULL NULL 42 val_42 NULL NULL
+NULL NULL 421 val_421 NULL NULL
+NULL NULL 424 val_424 NULL NULL
+NULL NULL 424 val_424 NULL NULL
+NULL NULL 427 val_427 NULL NULL
+NULL NULL 429 val_429 NULL NULL
+NULL NULL 429 val_429 NULL NULL
+NULL NULL 43 val_43 NULL NULL
+NULL NULL 430 val_430 NULL NULL
+NULL NULL 430 val_430 NULL NULL
+NULL NULL 430 val_430 NULL NULL
+NULL NULL 431 val_431 NULL NULL
+NULL NULL 431 val_431 NULL NULL
+NULL NULL 431 val_431 NULL NULL
+NULL NULL 432 val_432 NULL NULL
+NULL NULL 435 val_435 NULL NULL
+NULL NULL 436 val_436 NULL NULL
+NULL NULL 437 val_437 NULL NULL
+NULL NULL 438 val_438 NULL NULL
+NULL NULL 438 val_438 NULL NULL
+NULL NULL 438 val_438 NULL NULL
+NULL NULL 439 val_439 NULL NULL
+NULL NULL 439 val_439 NULL NULL
+NULL NULL 44 val_44 NULL NULL
+NULL NULL 443 val_443 NULL NULL
+NULL NULL 444 val_444 NULL NULL
+NULL NULL 446 val_446 NULL NULL
+NULL NULL 448 val_448 NULL NULL
+NULL NULL 449 val_449 NULL NULL
+NULL NULL 452 val_452 NULL NULL
+NULL NULL 453 val_453 NULL NULL
+NULL NULL 454 val_454 NULL NULL
+NULL NULL 454 val_454 NULL NULL
+NULL NULL 454 val_454 NULL NULL
+NULL NULL 455 val_455 NULL NULL
+NULL NULL 457 val_457 NULL NULL
+NULL NULL 458 val_458 NULL NULL
+NULL NULL 458 val_458 NULL NULL
+NULL NULL 459 val_459 NULL NULL
+NULL NULL 459 val_459 NULL NULL
+NULL NULL 460 val_460 NULL NULL
+NULL NULL 462 val_462 NULL NULL
+NULL NULL 462 val_462 NULL NULL
+NULL NULL 463 val_463 NULL NULL
+NULL NULL 463 val_463 NULL NULL
+NULL NULL 466 val_466 NULL NULL
+NULL NULL 466 val_466 NULL NULL
+NULL NULL 466 val_466 NULL NULL
+NULL NULL 467 val_467 NULL NULL
+NULL NULL 468 val_468 NULL NULL
+NULL NULL 468 val_468 NULL NULL
+NULL NULL 468 val_468 NULL NULL
+NULL NULL 468 val_468 NULL NULL
+NULL NULL 469 val_469 NULL NULL
+NULL NULL 469 val_469 NULL NULL
+NULL NULL 469 val_469 NULL NULL
+NULL NULL 469 val_469 NULL NULL
+NULL NULL 469 val_469 NULL NULL
+NULL NULL 47 val_47 NULL NULL
+NULL NULL 470 val_470 NULL NULL
+NULL NULL 472 val_472 NULL NULL
+NULL NULL 475 val_475 NULL NULL
+NULL NULL 477 val_477 NULL NULL
+NULL NULL 478 val_478 NULL NULL
+NULL NULL 478 val_478 NULL NULL
+NULL NULL 479 val_479 NULL NULL
+NULL NULL 480 val_480 NULL NULL
+NULL NULL 480 val_480 NULL NULL
+NULL NULL 480 val_480 NULL NULL
+NULL NULL 481 val_481 NULL NULL
+NULL NULL 482 val_482 NULL NULL
+NULL NULL 483 val_483 NULL NULL
+NULL NULL 484 val_484 NULL NULL
+NULL NULL 485 val_485 NULL NULL
+NULL NULL 487 val_487 NULL NULL
+NULL NULL 489 val_489 NULL NULL
+NULL NULL 489 val_489 NULL NULL
+NULL NULL 489 val_489 NULL NULL
+NULL NULL 489 val_489 NULL NULL
+NULL NULL 490 val_490 NULL NULL
+NULL NULL 491 val_491 NULL NULL
+NULL NULL 492 val_492 NULL NULL
+NULL NULL 492 val_492 NULL NULL
+NULL NULL 493 val_493 NULL NULL
+NULL NULL 494 val_494 NULL NULL
+NULL NULL 495 val_495 NULL NULL
+NULL NULL 496 val_496 NULL NULL
+NULL NULL 497 val_497 NULL NULL
+NULL NULL 498 val_498 NULL NULL
+NULL NULL 498 val_498 NULL NULL
+NULL NULL 498 val_498 NULL NULL
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 51 val_51 NULL NULL
+NULL NULL 51 val_51 NULL NULL
+NULL NULL 53 val_53 NULL NULL
+NULL NULL 54 val_54 NULL NULL
+NULL NULL 57 val_57 NULL NULL
+NULL NULL 58 val_58 NULL NULL
+NULL NULL 58 val_58 NULL NULL
+NULL NULL 64 val_64 NULL NULL
+NULL NULL 65 val_65 NULL NULL
+NULL NULL 66 val_66 NULL NULL
+NULL NULL 67 val_67 NULL NULL
+NULL NULL 67 val_67 NULL NULL
+NULL NULL 69 val_69 NULL NULL
+NULL NULL 70 val_70 NULL NULL
+NULL NULL 70 val_70 NULL NULL
+NULL NULL 70 val_70 NULL NULL
+NULL NULL 72 val_72 NULL NULL
+NULL NULL 72 val_72 NULL NULL
+NULL NULL 74 val_74 NULL NULL
+NULL NULL 76 val_76 NULL NULL
+NULL NULL 76 val_76 NULL NULL
+NULL NULL 77 val_77 NULL NULL
+NULL NULL 78 val_78 NULL NULL
+NULL NULL 8 val_8 8 val_8
+NULL NULL 80 val_80 NULL NULL
+NULL NULL 82 val_82 NULL NULL
+NULL NULL 83 val_83 NULL NULL
+NULL NULL 83 val_83 NULL NULL
+NULL NULL 84 val_84 NULL NULL
+NULL NULL 84 val_84 NULL NULL
+NULL NULL 85 val_85 NULL NULL
+NULL NULL 86 val_86 NULL NULL
+NULL NULL 87 val_87 NULL NULL
+NULL NULL 9 val_9 9 val_9
+NULL NULL 90 val_90 NULL NULL
+NULL NULL 90 val_90 NULL NULL
+NULL NULL 90 val_90 NULL NULL
+NULL NULL 92 val_92 NULL NULL
+NULL NULL 95 val_95 NULL NULL
+NULL NULL 95 val_95 NULL NULL
+NULL NULL 96 val_96 NULL NULL
+NULL NULL 97 val_97 NULL NULL
+NULL NULL 97 val_97 NULL NULL
+NULL NULL 98 val_98 NULL NULL
+NULL NULL 98 val_98 NULL NULL
+PREHOOK: query: explain
+SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ Right Outer Join1 to 2
+ filter predicates:
+ 0
+ 1 {(KEY.reducesinkkey0 > 10)}
+ 2 {(KEY.reducesinkkey0 < 10)}
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+ sort order: ++++++
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src src1 RIGHT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 0 val_0 0 val_0
+NULL NULL 2 val_2 2 val_2
+NULL NULL 4 val_4 4 val_4
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 5 val_5 5 val_5
+NULL NULL 8 val_8 8 val_8
+NULL NULL 9 val_9 9 val_9
+NULL NULL NULL NULL 10 val_10
+NULL NULL NULL NULL 100 val_100
+NULL NULL NULL NULL 100 val_100
+NULL NULL NULL NULL 103 val_103
+NULL NULL NULL NULL 103 val_103
+NULL NULL NULL NULL 104 val_104
+NULL NULL NULL NULL 104 val_104
+NULL NULL NULL NULL 105 val_105
+NULL NULL NULL NULL 11 val_11
+NULL NULL NULL NULL 111 val_111
+NULL NULL NULL NULL 113 val_113
+NULL NULL NULL NULL 113 val_113
+NULL NULL NULL NULL 114 val_114
+NULL NULL NULL NULL 116 val_116
+NULL NULL NULL NULL 118 val_118
+NULL NULL NULL NULL 118 val_118
+NULL NULL NULL NULL 119 val_119
+NULL NULL NULL NULL 119 val_119
+NULL NULL NULL NULL 119 val_119
+NULL NULL NULL NULL 12 val_12
+NULL NULL NULL NULL 12 val_12
+NULL NULL NULL NULL 120 val_120
+NULL NULL NULL NULL 120 val_120
+NULL NULL NULL NULL 125 val_125
+NULL NULL NULL NULL 125 val_125
+NULL NULL NULL NULL 126 val_126
+NULL NULL NULL NULL 128 val_128
+NULL NULL NULL NULL 128 val_128
+NULL NULL NULL NULL 128 val_128
+NULL NULL NULL NULL 129 val_129
+NULL NULL NULL NULL 129 val_129
+NULL NULL NULL NULL 131 val_131
+NULL NULL NULL NULL 133 val_133
+NULL NULL NULL NULL 134 val_134
+NULL NULL NULL NULL 134 val_134
+NULL NULL NULL NULL 136 val_136
+NULL NULL NULL NULL 137 val_137
+NULL NULL NULL NULL 137 val_137
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 138 val_138
+NULL NULL NULL NULL 143 val_143
+NULL NULL NULL NULL 145 val_145
+NULL NULL NULL NULL 146 val_146
+NULL NULL NULL NULL 146 val_146
+NULL NULL NULL NULL 149 val_149
+NULL NULL NULL NULL 149 val_149
+NULL NULL NULL NULL 15 val_15
+NULL NULL NULL NULL 15 val_15
+NULL NULL NULL NULL 150 val_150
+NULL NULL NULL NULL 152 val_152
+NULL NULL NULL NULL 152 val_152
+NULL NULL NULL NULL 153 val_153
+NULL NULL NULL NULL 155 val_155
+NULL NULL NULL NULL 156 val_156
+NULL NULL NULL NULL 157 val_157
+NULL NULL NULL NULL 158 val_158
+NULL NULL NULL NULL 160 val_160
+NULL NULL NULL NULL 162 val_162
+NULL NULL NULL NULL 163 val_163
+NULL NULL NULL NULL 164 val_164
+NULL NULL NULL NULL 164 val_164
+NULL NULL NULL NULL 165 val_165
+NULL NULL NULL NULL 165 val_165
+NULL NULL NULL NULL 166 val_166
+NULL NULL NULL NULL 167 val_167
+NULL NULL NULL NULL 167 val_167
+NULL NULL NULL NULL 167 val_167
+NULL NULL NULL NULL 168 val_168
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 169 val_169
+NULL NULL NULL NULL 17 val_17
+NULL NULL NULL NULL 170 val_170
+NULL NULL NULL NULL 172 val_172
+NULL NULL NULL NULL 172 val_172
+NULL NULL NULL NULL 174 val_174
+NULL NULL NULL NULL 174 val_174
+NULL NULL NULL NULL 175 val_175
+NULL NULL NULL NULL 175 val_175
+NULL NULL NULL NULL 176 val_176
+NULL NULL NULL NULL 176 val_176
+NULL NULL NULL NULL 177 val_177
+NULL NULL NULL NULL 178 val_178
+NULL NULL NULL NULL 179 val_179
+NULL NULL NULL NULL 179 val_179
+NULL NULL NULL NULL 18 val_18
+NULL NULL NULL NULL 18 val_18
+NULL NULL NULL NULL 180 val_180
+NULL NULL NULL NULL 181 val_181
+NULL NULL NULL NULL 183 val_183
+NULL NULL NULL NULL 186 val_186
+NULL NULL NULL NULL 187 val_187
+NULL NULL NULL NULL 187 val_187
+NULL NULL NULL NULL 187 val_187
+NULL NULL NULL NULL 189 val_189
+NULL NULL NULL NULL 19 val_19
+NULL NULL NULL NULL 190 val_190
+NULL NULL NULL NULL 191 val_191
+NULL NULL NULL NULL 191 val_191
+NULL NULL NULL NULL 192 val_192
+NULL NULL NULL NULL 193 val_193
+NULL NULL NULL NULL 193 val_193
+NULL NULL NULL NULL 193 val_193
+NULL NULL NULL NULL 194 val_194
+NULL NULL NULL NULL 195 val_195
+NULL NULL NULL NULL 195 val_195
+NULL NULL NULL NULL 196 val_196
+NULL NULL NULL NULL 197 val_197
+NULL NULL NULL NULL 197 val_197
+NULL NULL NULL NULL 199 val_199
+NULL NULL NULL NULL 199 val_199
+NULL NULL NULL NULL 199 val_199
+NULL NULL NULL NULL 20 val_20
+NULL NULL NULL NULL 200 val_200
+NULL NULL NULL NULL 200 val_200
+NULL NULL NULL NULL 201 val_201
+NULL NULL NULL NULL 202 val_202
+NULL NULL NULL NULL 203 val_203
+NULL NULL NULL NULL 203 val_203
+NULL NULL NULL NULL 205 val_205
+NULL NULL NULL NULL 205 val_205
+NULL NULL NULL NULL 207 val_207
+NULL NULL NULL NULL 207 val_207
+NULL NULL NULL NULL 208 val_208
+NULL NULL NULL NULL 208 val_208
+NULL NULL NULL NULL 208 val_208
+NULL NULL NULL NULL 209 val_209
+NULL NULL NULL NULL 209 val_209
+NULL NULL NULL NULL 213 val_213
+NULL NULL NULL NULL 213 val_213
+NULL NULL NULL NULL 214 val_214
+NULL NULL NULL NULL 216 val_216
+NULL NULL NULL NULL 216 val_216
+NULL NULL NULL NULL 217 val_217
+NULL NULL NULL NULL 217 val_217
+NULL NULL NULL NULL 218 val_218
+NULL NULL NULL NULL 219 val_219
+NULL NULL NULL NULL 219 val_219
+NULL NULL NULL NULL 221 val_221
+NULL NULL NULL NULL 221 val_221
+NULL NULL NULL NULL 222 val_222
+NULL NULL NULL NULL 223 val_223
+NULL NULL NULL NULL 223 val_223
+NULL NULL NULL NULL 224 val_224
+NULL NULL NULL NULL 224 val_224
+NULL NULL NULL NULL 226 val_226
+NULL NULL NULL NULL 228 val_228
+NULL NULL NULL NULL 229 val_229
+NULL NULL NULL NULL 229 val_229
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 230 val_230
+NULL NULL NULL NULL 233 val_233
+NULL NULL NULL NULL 233 val_233
+NULL NULL NULL NULL 235 val_235
+NULL NULL NULL NULL 237 val_237
+NULL NULL NULL NULL 237 val_237
+NULL NULL NULL NULL 238 val_238
+NULL NULL NULL NULL 238 val_238
+NULL NULL NULL NULL 239 val_239
+NULL NULL NULL NULL 239 val_239
+NULL NULL NULL NULL 24 val_24
+NULL NULL NULL NULL 24 val_24
+NULL NULL NULL NULL 241 val_241
+NULL NULL NULL NULL 242 val_242
+NULL NULL NULL NULL 242 val_242
+NULL NULL NULL NULL 244 val_244
+NULL NULL NULL NULL 247 val_247
+NULL NULL NULL NULL 248 val_248
+NULL NULL NULL NULL 249 val_249
+NULL NULL NULL NULL 252 val_252
+NULL NULL NULL NULL 255 val_255
+NULL NULL NULL NULL 255 val_255
+NULL NULL NULL NULL 256 val_256
+NULL NULL NULL NULL 256 val_256
+NULL NULL NULL NULL 257 val_257
+NULL NULL NULL NULL 258 val_258
+NULL NULL NULL NULL 26 val_26
+NULL NULL NULL NULL 26 val_26
+NULL NULL NULL NULL 260 val_260
+NULL NULL NULL NULL 262 val_262
+NULL NULL NULL NULL 263 val_263
+NULL NULL NULL NULL 265 val_265
+NULL NULL NULL NULL 265 val_265
+NULL NULL NULL NULL 266 val_266
+NULL NULL NULL NULL 27 val_27
+NULL NULL NULL NULL 272 val_272
+NULL NULL NULL NULL 272 val_272
+NULL NULL NULL NULL 273 val_273
+NULL NULL NULL NULL 273 val_273
+NULL NULL NULL NULL 273 val_273
+NULL NULL NULL NULL 274 val_274
+NULL NULL NULL NULL 275 val_275
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 277 val_277
+NULL NULL NULL NULL 278 val_278
+NULL NULL NULL NULL 278 val_278
+NULL NULL NULL NULL 28 val_28
+NULL NULL NULL NULL 280 val_280
+NULL NULL NULL NULL 280 val_280
+NULL NULL NULL NULL 281 val_281
+NULL NULL NULL NULL 281 val_281
+NULL NULL NULL NULL 282 val_282
+NULL NULL NULL NULL 282 val_282
+NULL NULL NULL NULL 283 val_283
+NULL NULL NULL NULL 284 val_284
+NULL NULL NULL NULL 285 val_285
+NULL NULL NULL NULL 286 val_286
+NULL NULL NULL NULL 287 val_287
+NULL NULL NULL NULL 288 val_288
+NULL NULL NULL NULL 288 val_288
+NULL NULL NULL NULL 289 val_289
+NULL NULL NULL NULL 291 val_291
+NULL NULL NULL NULL 292 val_292
+NULL NULL NULL NULL 296 val_296
+NULL NULL NULL NULL 298 val_298
+NULL NULL NULL NULL 298 val_298
+NULL NULL NULL NULL 298 val_298
+NULL NULL NULL NULL 30 val_30
+NULL NULL NULL NULL 302 val_302
+NULL NULL NULL NULL 305 val_305
+NULL NULL NULL NULL 306 val_306
+NULL NULL NULL NULL 307 val_307
+NULL NULL NULL NULL 307 val_307
+NULL NULL NULL NULL 308 val_308
+NULL NULL NULL NULL 309 val_309
+NULL NULL NULL NULL 309 val_309
+NULL NULL NULL NULL 310 val_310
+NULL NULL NULL NULL 311 val_311
+NULL NULL NULL NULL 311 val_311
+NULL NULL NULL NULL 311 val_311
+NULL NULL NULL NULL 315 val_315
+NULL NULL NULL NULL 316 val_316
+NULL NULL NULL NULL 316 val_316
+NULL NULL NULL NULL 316 val_316
+NULL NULL NULL NULL 317 val_317
+NULL NULL NULL NULL 317 val_317
+NULL NULL NULL NULL 318 val_318
+NULL NULL NULL NULL 318 val_318
+NULL NULL NULL NULL 318 val_318
+NULL NULL NULL NULL 321 val_321
+NULL NULL NULL NULL 321 val_321
+NULL NULL NULL NULL 322 val_322
+NULL NULL NULL NULL 322 val_322
+NULL NULL NULL NULL 323 val_323
+NULL NULL NULL NULL 325 val_325
+NULL NULL NULL NULL 325 val_325
+NULL NULL NULL NULL 327 val_327
+NULL NULL NULL NULL 327 val_327
+NULL NULL NULL NULL 327 val_327
+NULL NULL NULL NULL 33 val_33
+NULL NULL NULL NULL 331 val_331
+NULL NULL NULL NULL 331 val_331
+NULL NULL NULL NULL 332 val_332
+NULL NULL NULL NULL 333 val_333
+NULL NULL NULL NULL 333 val_333
+NULL NULL NULL NULL 335 val_335
+NULL NULL NULL NULL 336 val_336
+NULL NULL NULL NULL 338 val_338
+NULL NULL NULL NULL 339 val_339
+NULL NULL NULL NULL 34 val_34
+NULL NULL NULL NULL 341 val_341
+NULL NULL NULL NULL 342 val_342
+NULL NULL NULL NULL 342 val_342
+NULL NULL NULL NULL 344 val_344
+NULL NULL NULL NULL 344 val_344
+NULL NULL NULL NULL 345 val_345
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 348 val_348
+NULL NULL NULL NULL 35 val_35
+NULL NULL NULL NULL 35 val_35
+NULL NULL NULL NULL 35 val_35
+NULL NULL NULL NULL 351 val_351
+NULL NULL NULL NULL 353 val_353
+NULL NULL NULL NULL 353 val_353
+NULL NULL NULL NULL 356 val_356
+NULL NULL NULL NULL 360 val_360
+NULL NULL NULL NULL 362 val_362
+NULL NULL NULL NULL 364 val_364
+NULL NULL NULL NULL 365 val_365
+NULL NULL NULL NULL 366 val_366
+NULL NULL NULL NULL 367 val_367
+NULL NULL NULL NULL 367 val_367
+NULL NULL NULL NULL 368 val_368
+NULL NULL NULL NULL 369 val_369
+NULL NULL NULL NULL 369 val_369
+NULL NULL NULL NULL 369 val_369
+NULL NULL NULL NULL 37 val_37
+NULL NULL NULL NULL 37 val_37
+NULL NULL NULL NULL 373 val_373
+NULL NULL NULL NULL 374 val_374
+NULL NULL NULL NULL 375 val_375
+NULL NULL NULL NULL 377 val_377
+NULL NULL NULL NULL 378 val_378
+NULL NULL NULL NULL 379 val_379
+NULL NULL NULL NULL 382 val_382
+NULL NULL NULL NULL 382 val_382
+NULL NULL NULL NULL 384 val_384
+NULL NULL NULL NULL 384 val_384
+NULL NULL NULL NULL 384 val_384
+NULL NULL NULL NULL 386 val_386
+NULL NULL NULL NULL 389 val_389
+NULL NULL NULL NULL 392 val_392
+NULL NULL NULL NULL 393 val_393
+NULL NULL NULL NULL 394 val_394
+NULL NULL NULL NULL 395 val_395
+NULL NULL NULL NULL 395 val_395
+NULL NULL NULL NULL 396 val_396
+NULL NULL NULL NULL 396 val_396
+NULL NULL NULL NULL 396 val_396
+NULL NULL NULL NULL 397 val_397
+NULL NULL NULL NULL 397 val_397
+NULL NULL NULL NULL 399 val_399
+NULL NULL NULL NULL 399 val_399
+NULL NULL NULL NULL 400 val_400
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 401 val_401
+NULL NULL NULL NULL 402 val_402
+NULL NULL NULL NULL 403 val_403
+NULL NULL NULL NULL 403 val_403
+NULL NULL NULL NULL 403 val_403
+NULL NULL NULL NULL 404 val_404
+NULL NULL NULL NULL 404 val_404
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 406 val_406
+NULL NULL NULL NULL 407 val_407
+NULL NULL NULL NULL 409 val_409
+NULL NULL NULL NULL 409 val_409
+NULL NULL NULL NULL 409 val_409
+NULL NULL NULL NULL 41 val_41
+NULL NULL NULL NULL 411 val_411
+NULL NULL NULL NULL 413 val_413
+NULL NULL NULL NULL 413 val_413
+NULL NULL NULL NULL 414 val_414
+NULL NULL NULL NULL 414 val_414
+NULL NULL NULL NULL 417 val_417
+NULL NULL NULL NULL 417 val_417
+NULL NULL NULL NULL 417 val_417
+NULL NULL NULL NULL 418 val_418
+NULL NULL NULL NULL 419 val_419
+NULL NULL NULL NULL 42 val_42
+NULL NULL NULL NULL 42 val_42
+NULL NULL NULL NULL 421 val_421
+NULL NULL NULL NULL 424 val_424
+NULL NULL NULL NULL 424 val_424
+NULL NULL NULL NULL 427 val_427
+NULL NULL NULL NULL 429 val_429
+NULL NULL NULL NULL 429 val_429
+NULL NULL NULL NULL 43 val_43
+NULL NULL NULL NULL 430 val_430
+NULL NULL NULL NULL 430 val_430
+NULL NULL NULL NULL 430 val_430
+NULL NULL NULL NULL 431 val_431
+NULL NULL NULL NULL 431 val_431
+NULL NULL NULL NULL 431 val_431
+NULL NULL NULL NULL 432 val_432
+NULL NULL NULL NULL 435 val_435
+NULL NULL NULL NULL 436 val_436
+NULL NULL NULL NULL 437 val_437
+NULL NULL NULL NULL 438 val_438
+NULL NULL NULL NULL 438 val_438
+NULL NULL NULL NULL 438 val_438
+NULL NULL NULL NULL 439 val_439
+NULL NULL NULL NULL 439 val_439
+NULL NULL NULL NULL 44 val_44
+NULL NULL NULL NULL 443 val_443
+NULL NULL NULL NULL 444 val_444
+NULL NULL NULL NULL 446 val_446
+NULL NULL NULL NULL 448 val_448
+NULL NULL NULL NULL 449 val_449
+NULL NULL NULL NULL 452 val_452
+NULL NULL NULL NULL 453 val_453
+NULL NULL NULL NULL 454 val_454
+NULL NULL NULL NULL 454 val_454
+NULL NULL NULL NULL 454 val_454
+NULL NULL NULL NULL 455 val_455
+NULL NULL NULL NULL 457 val_457
+NULL NULL NULL NULL 458 val_458
+NULL NULL NULL NULL 458 val_458
+NULL NULL NULL NULL 459 val_459
+NULL NULL NULL NULL 459 val_459
+NULL NULL NULL NULL 460 val_460
+NULL NULL NULL NULL 462 val_462
+NULL NULL NULL NULL 462 val_462
+NULL NULL NULL NULL 463 val_463
+NULL NULL NULL NULL 463 val_463
+NULL NULL NULL NULL 466 val_466
+NULL NULL NULL NULL 466 val_466
+NULL NULL NULL NULL 466 val_466
+NULL NULL NULL NULL 467 val_467
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 468 val_468
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 469 val_469
+NULL NULL NULL NULL 47 val_47
+NULL NULL NULL NULL 470 val_470
+NULL NULL NULL NULL 472 val_472
+NULL NULL NULL NULL 475 val_475
+NULL NULL NULL NULL 477 val_477
+NULL NULL NU
<TRUNCATED>
[31/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/ctas.q.out b/ql/src/test/results/clientpositive/llap/ctas.q.out
new file mode 100644
index 0000000..93fc978
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/ctas.q.out
@@ -0,0 +1,930 @@
+PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
+-- SORT_QUERY_RESULTS
+
+create table nzhang_Tmp(a int, b string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_Tmp
+POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
+-- SORT_QUERY_RESULTS
+
+create table nzhang_Tmp(a int, b string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_Tmp
+PREHOOK: query: select * from nzhang_Tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_tmp
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_Tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_tmp
+#### A masked pattern was here ####
+PREHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-2, Stage-0
+ Stage-3 depends on stages: Stage-4
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_CTAS1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-4
+ Create Table Operator:
+ Create Table
+ columns: k string, value string
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_CTAS1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_CTAS1
+POSTHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_CTAS1
+PREHOOK: query: select * from nzhang_CTAS1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_ctas1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_CTAS1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_ctas1
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+PREHOOK: query: describe formatted nzhang_CTAS1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_ctas1
+POSTHOOK: query: describe formatted nzhang_CTAS1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_ctas1
+# col_name data_type comment
+
+k string
+value string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 10
+ rawDataSize 96
+ totalSize 106
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-2, Stage-0
+ Stage-3 depends on stages: Stage-4
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_ctas2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-4
+ Create Table Operator:
+ Create Table
+ columns: key string, value string
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_ctas2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_ctas2
+POSTHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_ctas2
+PREHOOK: query: select * from nzhang_ctas2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_ctas2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_ctas2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_ctas2
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+PREHOOK: query: describe formatted nzhang_CTAS2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_ctas2
+POSTHOOK: query: describe formatted nzhang_CTAS2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_ctas2
+# col_name data_type comment
+
+key string
+value string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 10
+ rawDataSize 96
+ totalSize 106
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-2, Stage-0
+ Stage-3 depends on stages: Stage-4
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: (key / 2) (type: double), concat(value, '_con') (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: double), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: double), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+ name: default.nzhang_ctas3
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-4
+ Create Table Operator:
+ Create Table
+ columns: half_key double, conb string
+ input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+ serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+ name: default.nzhang_ctas3
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_ctas3
+POSTHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_ctas3
+PREHOOK: query: select * from nzhang_ctas3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_ctas3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_ctas3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_ctas3
+#### A masked pattern was here ####
+0.0 val_0_con
+0.0 val_0_con
+0.0 val_0_con
+1.0 val_2_con
+2.0 val_4_con
+2.5 val_5_con
+2.5 val_5_con
+2.5 val_5_con
+4.0 val_8_con
+4.5 val_9_con
+PREHOOK: query: describe formatted nzhang_CTAS3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_ctas3
+POSTHOOK: query: describe formatted nzhang_CTAS3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_ctas3
+# col_name data_type comment
+
+half_key double
+conb string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 10
+ rawDataSize 120
+ totalSize 199
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2
+POSTHOOK: type: CREATETABLE
+STAGE DEPENDENCIES:
+
+STAGE PLANS:
+PREHOOK: query: create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2
+POSTHOOK: type: CREATETABLE
+PREHOOK: query: select * from nzhang_ctas3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_ctas3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_ctas3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_ctas3
+#### A masked pattern was here ####
+0.0 val_0_con
+0.0 val_0_con
+0.0 val_0_con
+1.0 val_2_con
+2.0 val_4_con
+2.5 val_5_con
+2.5 val_5_con
+2.5 val_5_con
+4.0 val_8_con
+4.5 val_9_con
+PREHOOK: query: describe formatted nzhang_CTAS3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_ctas3
+POSTHOOK: query: describe formatted nzhang_CTAS3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_ctas3
+# col_name data_type comment
+
+half_key double
+conb string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 10
+ rawDataSize 120
+ totalSize 199
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-2, Stage-0
+ Stage-3 depends on stages: Stage-4
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_ctas4
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-4
+ Create Table Operator:
+ Create Table
+ columns: key string, value string
+ field delimiter: ,
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_ctas4
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_ctas4
+POSTHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_ctas4
+PREHOOK: query: select * from nzhang_ctas4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_ctas4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_ctas4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_ctas4
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+PREHOOK: query: describe formatted nzhang_CTAS4
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_ctas4
+POSTHOOK: query: describe formatted nzhang_CTAS4
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_ctas4
+# col_name data_type comment
+
+key string
+value string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 10
+ rawDataSize 96
+ totalSize 106
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ field.delim ,
+ serialization.format ,
+PREHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+ABSTRACT SYNTAX TREE:
+
+TOK_CREATETABLE
+ TOK_TABNAME
+ nzhang_ctas5
+ TOK_LIKETABLE
+ TOK_TABLEROWFORMAT
+ TOK_SERDEPROPS
+ TOK_TABLEROWFORMATFIELD
+ ','
+ TOK_TABLEROWFORMATLINES
+ '\012'
+ TOK_FILEFORMAT_GENERIC
+ textfile
+ TOK_QUERY
+ TOK_FROM
+ TOK_TABREF
+ TOK_TABNAME
+ src
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ key
+ TOK_SELEXPR
+ TOK_TABLE_OR_COL
+ value
+ TOK_SORTBY
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ key
+ TOK_TABSORTCOLNAMEASC
+ TOK_TABLE_OR_COL
+ value
+ TOK_LIMIT
+ 10
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-2, Stage-0
+ Stage-3 depends on stages: Stage-4
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: src
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src
+ name: default.src
+ Truncated Path -> Alias:
+ /src [src]
+ Reducer 2
+ Execution mode: llap
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ auto parallelism: false
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns key,value
+ columns.types string:string
+ field.delim ,
+ line.delim
+
+ name default.nzhang_ctas5
+ serialization.format ,
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_ctas5
+ TotalFiles: 1
+ GatherStats: true
+ MultiFileSpray: false
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-4
+ Create Table Operator:
+ Create Table
+ columns: key string, value string
+ field delimiter: ,
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ line delimiter:
+
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_ctas5
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+#### A masked pattern was here ####
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_ctas5
+POSTHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_ctas5
+PREHOOK: query: create table nzhang_ctas6 (key string, `to` string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_ctas6
+POSTHOOK: query: create table nzhang_ctas6 (key string, `to` string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_ctas6
+PREHOOK: query: insert overwrite table nzhang_ctas6 select key, value from src tablesample (10 rows)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@nzhang_ctas6
+POSTHOOK: query: insert overwrite table nzhang_ctas6 select key, value from src tablesample (10 rows)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@nzhang_ctas6
+POSTHOOK: Lineage: nzhang_ctas6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas6.to SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@nzhang_ctas6
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_ctas7
+POSTHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@nzhang_ctas6
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_ctas7
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/custom_input_output_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/custom_input_output_format.q.out b/ql/src/test/results/clientpositive/llap/custom_input_output_format.q.out
new file mode 100644
index 0000000..662ed1a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/custom_input_output_format.q.out
@@ -0,0 +1,102 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE src1_rot13_iof(key STRING, value STRING)
+ STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat'
+ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src1_rot13_iof
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE src1_rot13_iof(key STRING, value STRING)
+ STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat'
+ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src1_rot13_iof
+PREHOOK: query: DESCRIBE EXTENDED src1_rot13_iof
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src1_rot13_iof
+POSTHOOK: query: DESCRIBE EXTENDED src1_rot13_iof
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src1_rot13_iof
+key string
+value string
+
+#### A masked pattern was here ####
+PREHOOK: query: SELECT * FROM src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+
+
+
+
+ val_165
+ val_193
+ val_265
+ val_27
+ val_409
+ val_484
+128
+146 val_146
+150 val_150
+213 val_213
+224
+238 val_238
+255 val_255
+273 val_273
+278 val_278
+311 val_311
+369
+401 val_401
+406 val_406
+66 val_66
+98 val_98
+PREHOOK: query: INSERT OVERWRITE TABLE src1_rot13_iof SELECT * FROM src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@src1_rot13_iof
+POSTHOOK: query: INSERT OVERWRITE TABLE src1_rot13_iof SELECT * FROM src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@src1_rot13_iof
+POSTHOOK: Lineage: src1_rot13_iof.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src1_rot13_iof.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT * FROM src1_rot13_iof
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1_rot13_iof
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM src1_rot13_iof
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1_rot13_iof
+#### A masked pattern was here ####
+
+
+
+
+ val_165
+ val_193
+ val_265
+ val_27
+ val_409
+ val_484
+128
+146 val_146
+150 val_150
+213 val_213
+224
+238 val_238
+255 val_255
+273 val_273
+278 val_278
+311 val_311
+369
+401 val_401
+406 val_406
+66 val_66
+98 val_98
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/delete_all_non_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/delete_all_non_partitioned.q.out b/ql/src/test/results/clientpositive/llap/delete_all_non_partitioned.q.out
new file mode 100644
index 0000000..38ce075
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/delete_all_non_partitioned.q.out
@@ -0,0 +1,52 @@
+PREHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_danp
+POSTHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_danp
+PREHOOK: query: insert into table acid_danp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_danp
+POSTHOOK: query: insert into table acid_danp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_danp
+POSTHOOK: Lineage: acid_danp.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_danp.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select a,b from acid_danp order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_danp
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b from acid_danp order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_danp
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa
+-1073051226 A34p7oRr2WvUJNf
+-1072910839 0iqrc5
+-1072081801 dPkN74F7
+-1072076362 2uLyD28144vklju213J1mr
+-1071480828 aw724t8c5558x2xneC624
+-1071363017 Anj0oF
+-1070883071 0ruyd6Y50JpdGRf6HqD
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
+PREHOOK: query: delete from acid_danp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_danp
+PREHOOK: Output: default@acid_danp
+POSTHOOK: query: delete from acid_danp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_danp
+POSTHOOK: Output: default@acid_danp
+PREHOOK: query: select a,b from acid_danp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_danp
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b from acid_danp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_danp
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/delete_all_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/delete_all_partitioned.q.out b/ql/src/test/results/clientpositive/llap/delete_all_partitioned.q.out
new file mode 100644
index 0000000..90f8753
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/delete_all_partitioned.q.out
@@ -0,0 +1,86 @@
+PREHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_dap
+POSTHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_dap
+PREHOOK: query: insert into table acid_dap partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dap@ds=today
+POSTHOOK: query: insert into table acid_dap partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dap@ds=today
+POSTHOOK: Lineage: acid_dap PARTITION(ds=today).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dap PARTITION(ds=today).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: insert into table acid_dap partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 1000 order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dap@ds=tomorrow
+POSTHOOK: query: insert into table acid_dap partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 1000 order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dap@ds=tomorrow
+POSTHOOK: Lineage: acid_dap PARTITION(ds=tomorrow).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dap PARTITION(ds=tomorrow).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select a,b,ds from acid_dap order by a,b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dap
+PREHOOK: Input: default@acid_dap@ds=today
+PREHOOK: Input: default@acid_dap@ds=tomorrow
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b,ds from acid_dap order by a,b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dap
+POSTHOOK: Input: default@acid_dap@ds=today
+POSTHOOK: Input: default@acid_dap@ds=tomorrow
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa today
+-1073051226 A34p7oRr2WvUJNf today
+-1072910839 0iqrc5 today
+-1072081801 dPkN74F7 today
+-1072076362 2uLyD28144vklju213J1mr today
+-1071480828 aw724t8c5558x2xneC624 today
+-1071363017 Anj0oF today
+-1070883071 0ruyd6Y50JpdGRf6HqD today
+-1070551679 iUR3Q today
+-1069736047 k17Am8uPHWk02cEf1jet today
+6981 NULL tomorrow
+6981 1FNNhmiFLGw425NA13g tomorrow
+6981 4KhrrQ0nJ7bMNTvhSCA tomorrow
+6981 K630vaVf tomorrow
+6981 Y5x3JuI3M8jngv5N tomorrow
+6981 YdG61y00526u5 tomorrow
+6981 a3EhVU6Wuy7ycJ7wY7h2gv tomorrow
+6981 o4lvY20511w0EOX3P3I82p63 tomorrow
+6981 o5mb0QP5Y48Qd4vdB0 tomorrow
+6981 sF2CRfgt2K tomorrow
+PREHOOK: query: delete from acid_dap
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dap
+PREHOOK: Input: default@acid_dap@ds=today
+PREHOOK: Input: default@acid_dap@ds=tomorrow
+PREHOOK: Output: default@acid_dap@ds=today
+PREHOOK: Output: default@acid_dap@ds=tomorrow
+POSTHOOK: query: delete from acid_dap
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dap
+POSTHOOK: Input: default@acid_dap@ds=today
+POSTHOOK: Input: default@acid_dap@ds=tomorrow
+POSTHOOK: Output: default@acid_dap@ds=today
+POSTHOOK: Output: default@acid_dap@ds=tomorrow
+PREHOOK: query: select * from acid_dap
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dap
+PREHOOK: Input: default@acid_dap@ds=today
+PREHOOK: Input: default@acid_dap@ds=tomorrow
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_dap
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dap
+POSTHOOK: Input: default@acid_dap@ds=today
+POSTHOOK: Input: default@acid_dap@ds=tomorrow
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/delete_orig_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/delete_orig_table.q.out b/ql/src/test/results/clientpositive/llap/delete_orig_table.q.out
new file mode 100644
index 0000000..a036b06
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/delete_orig_table.q.out
@@ -0,0 +1,61 @@
+PREHOOK: query: create table acid_dot(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_dot
+POSTHOOK: query: create table acid_dot(
+ ctinyint TINYINT,
+ csmallint SMALLINT,
+ cint INT,
+ cbigint BIGINT,
+ cfloat FLOAT,
+ cdouble DOUBLE,
+ cstring1 STRING,
+ cstring2 STRING,
+ ctimestamp1 TIMESTAMP,
+ ctimestamp2 TIMESTAMP,
+ cboolean1 BOOLEAN,
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_dot
+PREHOOK: query: select count(*) from acid_dot
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dot
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from acid_dot
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dot
+#### A masked pattern was here ####
+12288
+PREHOOK: query: delete from acid_dot where cint < -1070551679
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dot
+PREHOOK: Output: default@acid_dot
+POSTHOOK: query: delete from acid_dot where cint < -1070551679
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dot
+POSTHOOK: Output: default@acid_dot
+PREHOOK: query: select count(*) from acid_dot
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dot
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from acid_dot
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dot
+#### A masked pattern was here ####
+12280
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/delete_tmp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/delete_tmp_table.q.out b/ql/src/test/results/clientpositive/llap/delete_tmp_table.q.out
new file mode 100644
index 0000000..4dc7344
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/delete_tmp_table.q.out
@@ -0,0 +1,60 @@
+PREHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_dtt
+POSTHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_dtt
+PREHOOK: query: insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dtt
+POSTHOOK: query: insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dtt
+POSTHOOK: Lineage: acid_dtt.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dtt.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select * from acid_dtt order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dtt
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_dtt order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dtt
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa
+-1073051226 A34p7oRr2WvUJNf
+-1072910839 0iqrc5
+-1072081801 dPkN74F7
+-1072076362 2uLyD28144vklju213J1mr
+-1071480828 aw724t8c5558x2xneC624
+-1071363017 Anj0oF
+-1070883071 0ruyd6Y50JpdGRf6HqD
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
+PREHOOK: query: delete from acid_dtt where b = '0ruyd6Y50JpdGRf6HqD' or b = '2uLyD28144vklju213J1mr'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dtt
+PREHOOK: Output: default@acid_dtt
+POSTHOOK: query: delete from acid_dtt where b = '0ruyd6Y50JpdGRf6HqD' or b = '2uLyD28144vklju213J1mr'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dtt
+POSTHOOK: Output: default@acid_dtt
+PREHOOK: query: select a,b from acid_dtt order by b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dtt
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b from acid_dtt order by b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dtt
+#### A masked pattern was here ####
+-1072910839 0iqrc5
+-1073051226 A34p7oRr2WvUJNf
+-1071363017 Anj0oF
+-1071480828 aw724t8c5558x2xneC624
+-1072081801 dPkN74F7
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
+-1073279343 oj1YrV5Wa
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/delete_where_no_match.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/delete_where_no_match.q.out b/ql/src/test/results/clientpositive/llap/delete_where_no_match.q.out
new file mode 100644
index 0000000..cb2adc6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/delete_where_no_match.q.out
@@ -0,0 +1,62 @@
+PREHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_dwnm
+POSTHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_dwnm
+PREHOOK: query: insert into table acid_dwnm select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dwnm
+POSTHOOK: query: insert into table acid_dwnm select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dwnm
+POSTHOOK: Lineage: acid_dwnm.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dwnm.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select * from acid_dwnm order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwnm
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_dwnm order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwnm
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa
+-1073051226 A34p7oRr2WvUJNf
+-1072910839 0iqrc5
+-1072081801 dPkN74F7
+-1072076362 2uLyD28144vklju213J1mr
+-1071480828 aw724t8c5558x2xneC624
+-1071363017 Anj0oF
+-1070883071 0ruyd6Y50JpdGRf6HqD
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
+PREHOOK: query: delete from acid_dwnm where b = 'nosuchvalue'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwnm
+PREHOOK: Output: default@acid_dwnm
+POSTHOOK: query: delete from acid_dwnm where b = 'nosuchvalue'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwnm
+POSTHOOK: Output: default@acid_dwnm
+PREHOOK: query: select a,b from acid_dwnm order by b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwnm
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b from acid_dwnm order by b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwnm
+#### A masked pattern was here ####
+-1072910839 0iqrc5
+-1070883071 0ruyd6Y50JpdGRf6HqD
+-1072076362 2uLyD28144vklju213J1mr
+-1073051226 A34p7oRr2WvUJNf
+-1071363017 Anj0oF
+-1071480828 aw724t8c5558x2xneC624
+-1072081801 dPkN74F7
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
+-1073279343 oj1YrV5Wa
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/delete_where_non_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/delete_where_non_partitioned.q.out b/ql/src/test/results/clientpositive/llap/delete_where_non_partitioned.q.out
new file mode 100644
index 0000000..1bdb1e6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/delete_where_non_partitioned.q.out
@@ -0,0 +1,61 @@
+PREHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_dwnp
+POSTHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_dwnp
+PREHOOK: query: insert into table acid_dwnp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dwnp
+POSTHOOK: query: insert into table acid_dwnp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dwnp
+POSTHOOK: Lineage: acid_dwnp.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dwnp.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select * from acid_dwnp order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwnp
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_dwnp order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwnp
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa
+-1073051226 A34p7oRr2WvUJNf
+-1072910839 0iqrc5
+-1072081801 dPkN74F7
+-1072076362 2uLyD28144vklju213J1mr
+-1071480828 aw724t8c5558x2xneC624
+-1071363017 Anj0oF
+-1070883071 0ruyd6Y50JpdGRf6HqD
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
+PREHOOK: query: delete from acid_dwnp where b = '0ruyd6Y50JpdGRf6HqD'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwnp
+PREHOOK: Output: default@acid_dwnp
+POSTHOOK: query: delete from acid_dwnp where b = '0ruyd6Y50JpdGRf6HqD'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwnp
+POSTHOOK: Output: default@acid_dwnp
+PREHOOK: query: select a,b from acid_dwnp order by b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwnp
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b from acid_dwnp order by b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwnp
+#### A masked pattern was here ####
+-1072910839 0iqrc5
+-1072076362 2uLyD28144vklju213J1mr
+-1073051226 A34p7oRr2WvUJNf
+-1071363017 Anj0oF
+-1071480828 aw724t8c5558x2xneC624
+-1072081801 dPkN74F7
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
+-1073279343 oj1YrV5Wa
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/delete_where_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/delete_where_partitioned.q.out b/ql/src/test/results/clientpositive/llap/delete_where_partitioned.q.out
new file mode 100644
index 0000000..fc2e369
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/delete_where_partitioned.q.out
@@ -0,0 +1,105 @@
+PREHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_dwp
+POSTHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_dwp
+PREHOOK: query: insert into table acid_dwp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dwp@ds=today
+POSTHOOK: query: insert into table acid_dwp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dwp@ds=today
+POSTHOOK: Lineage: acid_dwp PARTITION(ds=today).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dwp PARTITION(ds=today).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: insert into table acid_dwp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dwp@ds=tomorrow
+POSTHOOK: query: insert into table acid_dwp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dwp@ds=tomorrow
+POSTHOOK: Lineage: acid_dwp PARTITION(ds=tomorrow).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dwp PARTITION(ds=tomorrow).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select a,b,ds from acid_dwp order by a, ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwp
+PREHOOK: Input: default@acid_dwp@ds=today
+PREHOOK: Input: default@acid_dwp@ds=tomorrow
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b,ds from acid_dwp order by a, ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwp
+POSTHOOK: Input: default@acid_dwp@ds=today
+POSTHOOK: Input: default@acid_dwp@ds=tomorrow
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa today
+-1073051226 A34p7oRr2WvUJNf today
+-1072910839 0iqrc5 today
+-1072081801 dPkN74F7 today
+-1072076362 2uLyD28144vklju213J1mr today
+-1071480828 aw724t8c5558x2xneC624 today
+-1071363017 Anj0oF today
+-1070883071 0ruyd6Y50JpdGRf6HqD today
+-1070551679 iUR3Q today
+-1069736047 k17Am8uPHWk02cEf1jet today
+-9676535 MmMPCF2 tomorrow
+-9462165 7WLVW6F4h71Dgk7 tomorrow
+-9329892 e7sC5M0H5K6EgSTf41X tomorrow
+-9175632 UUBET8444iJDvjUlq3en tomorrow
+-9011819 A6CX2HDWN8 tomorrow
+-8987676 FhXANp2KDtMmA2gFd778pA tomorrow
+-8413710 81Rg5rR0IaInWw tomorrow
+-8230445 K6J1LIb5 tomorrow
+-7980033 HtI02nss6t8S0fqH4vcLkCD tomorrow
+-6882225 r6gCtT4Tgo5rG tomorrow
+PREHOOK: query: delete from acid_dwp where a = '-1071363017'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwp
+PREHOOK: Input: default@acid_dwp@ds=today
+PREHOOK: Input: default@acid_dwp@ds=tomorrow
+PREHOOK: Output: default@acid_dwp@ds=today
+PREHOOK: Output: default@acid_dwp@ds=tomorrow
+POSTHOOK: query: delete from acid_dwp where a = '-1071363017'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwp
+POSTHOOK: Input: default@acid_dwp@ds=today
+POSTHOOK: Input: default@acid_dwp@ds=tomorrow
+POSTHOOK: Output: default@acid_dwp@ds=today
+POSTHOOK: Output: default@acid_dwp@ds=tomorrow
+PREHOOK: query: select * from acid_dwp order by a, ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwp
+PREHOOK: Input: default@acid_dwp@ds=today
+PREHOOK: Input: default@acid_dwp@ds=tomorrow
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_dwp order by a, ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwp
+POSTHOOK: Input: default@acid_dwp@ds=today
+POSTHOOK: Input: default@acid_dwp@ds=tomorrow
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa today
+-1073051226 A34p7oRr2WvUJNf today
+-1072910839 0iqrc5 today
+-1072081801 dPkN74F7 today
+-1072076362 2uLyD28144vklju213J1mr today
+-1071480828 aw724t8c5558x2xneC624 today
+-1070883071 0ruyd6Y50JpdGRf6HqD today
+-1070551679 iUR3Q today
+-1069736047 k17Am8uPHWk02cEf1jet today
+-9676535 MmMPCF2 tomorrow
+-9462165 7WLVW6F4h71Dgk7 tomorrow
+-9329892 e7sC5M0H5K6EgSTf41X tomorrow
+-9175632 UUBET8444iJDvjUlq3en tomorrow
+-9011819 A6CX2HDWN8 tomorrow
+-8987676 FhXANp2KDtMmA2gFd778pA tomorrow
+-8413710 81Rg5rR0IaInWw tomorrow
+-8230445 K6J1LIb5 tomorrow
+-7980033 HtI02nss6t8S0fqH4vcLkCD tomorrow
+-6882225 r6gCtT4Tgo5rG tomorrow
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/delete_whole_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/delete_whole_partition.q.out b/ql/src/test/results/clientpositive/llap/delete_whole_partition.q.out
new file mode 100644
index 0000000..043daf4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/delete_whole_partition.q.out
@@ -0,0 +1,92 @@
+PREHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_dwhp
+POSTHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_dwhp
+PREHOOK: query: insert into table acid_dwhp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dwhp@ds=today
+POSTHOOK: query: insert into table acid_dwhp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dwhp@ds=today
+POSTHOOK: Lineage: acid_dwhp PARTITION(ds=today).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dwhp PARTITION(ds=today).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: insert into table acid_dwhp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dwhp@ds=tomorrow
+POSTHOOK: query: insert into table acid_dwhp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dwhp@ds=tomorrow
+POSTHOOK: Lineage: acid_dwhp PARTITION(ds=tomorrow).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dwhp PARTITION(ds=tomorrow).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select a,b,ds from acid_dwhp order by a, ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwhp
+PREHOOK: Input: default@acid_dwhp@ds=today
+PREHOOK: Input: default@acid_dwhp@ds=tomorrow
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b,ds from acid_dwhp order by a, ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwhp
+POSTHOOK: Input: default@acid_dwhp@ds=today
+POSTHOOK: Input: default@acid_dwhp@ds=tomorrow
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa today
+-1073051226 A34p7oRr2WvUJNf today
+-1072910839 0iqrc5 today
+-1072081801 dPkN74F7 today
+-1072076362 2uLyD28144vklju213J1mr today
+-1071480828 aw724t8c5558x2xneC624 today
+-1071363017 Anj0oF today
+-1070883071 0ruyd6Y50JpdGRf6HqD today
+-1070551679 iUR3Q today
+-1069736047 k17Am8uPHWk02cEf1jet today
+-9676535 MmMPCF2 tomorrow
+-9462165 7WLVW6F4h71Dgk7 tomorrow
+-9329892 e7sC5M0H5K6EgSTf41X tomorrow
+-9175632 UUBET8444iJDvjUlq3en tomorrow
+-9011819 A6CX2HDWN8 tomorrow
+-8987676 FhXANp2KDtMmA2gFd778pA tomorrow
+-8413710 81Rg5rR0IaInWw tomorrow
+-8230445 K6J1LIb5 tomorrow
+-7980033 HtI02nss6t8S0fqH4vcLkCD tomorrow
+-6882225 r6gCtT4Tgo5rG tomorrow
+PREHOOK: query: delete from acid_dwhp where ds = 'today'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwhp
+PREHOOK: Input: default@acid_dwhp@ds=today
+PREHOOK: Output: default@acid_dwhp@ds=today
+POSTHOOK: query: delete from acid_dwhp where ds = 'today'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwhp
+POSTHOOK: Input: default@acid_dwhp@ds=today
+POSTHOOK: Output: default@acid_dwhp@ds=today
+PREHOOK: query: select * from acid_dwhp order by a, ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dwhp
+PREHOOK: Input: default@acid_dwhp@ds=today
+PREHOOK: Input: default@acid_dwhp@ds=tomorrow
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_dwhp order by a, ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dwhp
+POSTHOOK: Input: default@acid_dwhp@ds=today
+POSTHOOK: Input: default@acid_dwhp@ds=tomorrow
+#### A masked pattern was here ####
+-9676535 MmMPCF2 tomorrow
+-9462165 7WLVW6F4h71Dgk7 tomorrow
+-9329892 e7sC5M0H5K6EgSTf41X tomorrow
+-9175632 UUBET8444iJDvjUlq3en tomorrow
+-9011819 A6CX2HDWN8 tomorrow
+-8987676 FhXANp2KDtMmA2gFd778pA tomorrow
+-8413710 81Rg5rR0IaInWw tomorrow
+-8230445 K6J1LIb5 tomorrow
+-7980033 HtI02nss6t8S0fqH4vcLkCD tomorrow
+-6882225 r6gCtT4Tgo5rG tomorrow
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out b/ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out
new file mode 100644
index 0000000..6de3e3a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/disable_merge_for_bucketing.q.out
@@ -0,0 +1,502 @@
+PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket2_1
+POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket2_1
+PREHOOK: query: explain extended
+insert overwrite table bucket2_1
+select * from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucket2_1
+select * from src
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_TABREF
+ TOK_TABNAME
+ src
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_TAB
+ TOK_TABNAME
+ bucket2_1
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_ALLCOLREF
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: string), _col1 (type: string)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: src
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src
+ name: default.src
+ Truncated Path -> Alias:
+ /src [src]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Select Operator
+ expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 1
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.bucket2_1
+ serialization.ddl struct bucket2_1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket2_1
+ TotalFiles: 2
+ GatherStats: true
+ MultiFileSpray: true
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types int:string
+#### A masked pattern was here ####
+ name default.bucket2_1
+ serialization.ddl struct bucket2_1 { i32 key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket2_1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucket2_1
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket2_1
+POSTHOOK: query: insert overwrite table bucket2_1
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket2_1
+POSTHOOK: Lineage: bucket2_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket2_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket2_1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket2_1
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+2 val_2
+4 val_4
+8 val_8
+10 val_10
+12 val_12
+12 val_12
+18 val_18
+18 val_18
+20 val_20
+24 val_24
+24 val_24
+26 val_26
+26 val_26
+28 val_28
+30 val_30
+34 val_34
+42 val_42
+42 val_42
+44 val_44
+54 val_54
+58 val_58
+58 val_58
+64 val_64
+66 val_66
+70 val_70
+70 val_70
+70 val_70
+72 val_72
+72 val_72
+74 val_74
+76 val_76
+76 val_76
+78 val_78
+80 val_80
+82 val_82
+84 val_84
+84 val_84
+86 val_86
+90 val_90
+90 val_90
+90 val_90
+92 val_92
+96 val_96
+98 val_98
+98 val_98
+100 val_100
+100 val_100
+104 val_104
+104 val_104
+114 val_114
+116 val_116
+118 val_118
+118 val_118
+120 val_120
+120 val_120
+126 val_126
+128 val_128
+128 val_128
+128 val_128
+134 val_134
+134 val_134
+136 val_136
+138 val_138
+138 val_138
+138 val_138
+138 val_138
+146 val_146
+146 val_146
+150 val_150
+152 val_152
+152 val_152
+156 val_156
+158 val_158
+160 val_160
+162 val_162
+164 val_164
+164 val_164
+166 val_166
+168 val_168
+170 val_170
+172 val_172
+172 val_172
+174 val_174
+174 val_174
+176 val_176
+176 val_176
+178 val_178
+180 val_180
+186 val_186
+190 val_190
+192 val_192
+194 val_194
+196 val_196
+200 val_200
+200 val_200
+202 val_202
+208 val_208
+208 val_208
+208 val_208
+214 val_214
+216 val_216
+216 val_216
+218 val_218
+222 val_222
+224 val_224
+224 val_224
+226 val_226
+228 val_228
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+230 val_230
+238 val_238
+238 val_238
+242 val_242
+242 val_242
+244 val_244
+248 val_248
+252 val_252
+256 val_256
+256 val_256
+258 val_258
+260 val_260
+262 val_262
+266 val_266
+272 val_272
+272 val_272
+274 val_274
+278 val_278
+278 val_278
+280 val_280
+280 val_280
+282 val_282
+282 val_282
+284 val_284
+286 val_286
+288 val_288
+288 val_288
+292 val_292
+296 val_296
+298 val_298
+298 val_298
+298 val_298
+302 val_302
+306 val_306
+308 val_308
+310 val_310
+316 val_316
+316 val_316
+316 val_316
+318 val_318
+318 val_318
+318 val_318
+322 val_322
+322 val_322
+332 val_332
+336 val_336
+338 val_338
+342 val_342
+342 val_342
+344 val_344
+344 val_344
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+348 val_348
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+366 val_366
+368 val_368
+374 val_374
+378 val_378
+382 val_382
+382 val_382
+384 val_384
+384 val_384
+384 val_384
+386 val_386
+392 val_392
+394 val_394
+396 val_396
+396 val_396
+396 val_396
+400 val_400
+402 val_402
+404 val_404
+404 val_404
+406 val_406
+406 val_406
+406 val_406
+406 val_406
+414 val_414
+414 val_414
+418 val_418
+424 val_424
+424 val_424
+430 val_430
+430 val_430
+430 val_430
+432 val_432
+436 val_436
+438 val_438
+438 val_438
+438 val_438
+444 val_444
+446 val_446
+448 val_448
+452 val_452
+454 val_454
+454 val_454
+454 val_454
+458 val_458
+458 val_458
+460 val_460
+462 val_462
+462 val_462
+466 val_466
+466 val_466
+466 val_466
+468 val_468
+468 val_468
+468 val_468
+468 val_468
+470 val_470
+472 val_472
+478 val_478
+478 val_478
+480 val_480
+480 val_480
+480 val_480
+482 val_482
+484 val_484
+490 val_490
+492 val_492
+492 val_492
+494 val_494
+496 val_496
+498 val_498
+498 val_498
+498 val_498
[41/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out
new file mode 100644
index 0000000..7c51746
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out
@@ -0,0 +1,1202 @@
+PREHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ /bucket_small/ds=2008-04-09 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 1 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 0 Map 1
+ Position of Big Table: 1
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [b]
+ /bucket_big/ds=2008-04-09 [b]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+76
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ /bucket_big/ds=2008-04-09 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ /bucket_small/ds=2008-04-09 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+76
+PREHOOK: query: -- The mapjoin should fail resulting in the sort-merge join
+explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The mapjoin should fail resulting in the sort-merge join
+explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ /bucket_big/ds=2008-04-09 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ /bucket_small/ds=2008-04-09 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+76
[26/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out
new file mode 100644
index 0000000..34c2307
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out
@@ -0,0 +1,1844 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table ss
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table ss
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table ss_orc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table ss_orc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table ss_part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table ss_part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table ss_part_orc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table ss_part_orc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table ss (
+ss_sold_date_sk int,
+ss_net_paid_inc_tax float,
+ss_net_profit float)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ss
+POSTHOOK: query: create table ss (
+ss_sold_date_sk int,
+ss_net_paid_inc_tax float,
+ss_net_profit float)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ss
+PREHOOK: query: create table ss_part (
+ss_net_paid_inc_tax float,
+ss_net_profit float)
+partitioned by (ss_sold_date_sk int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ss_part
+POSTHOOK: query: create table ss_part (
+ss_net_paid_inc_tax float,
+ss_net_profit float)
+partitioned by (ss_sold_date_sk int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ss_part
+PREHOOK: query: load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@ss
+POSTHOOK: query: load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@ss
+PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: ss
+ Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean)
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: float), _col2 (type: float)
+ sort order: +++
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.ss_part
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ss_sold_date_sk
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.ss_part
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss
+PREHOOK: Output: default@ss_part
+POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss
+POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617
+POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part
+POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452617]
+Database: default
+Table: ss_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 11
+ rawDataSize 151
+ totalSize 162
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part
+PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part
+POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+10022.63 3952.8 2452617
+1765.07 -4648.8 2452617
+2.1 -2026.3 2452617
+2.99 -11.32 2452617
+3423.95 -3164.07 2452617
+5362.01 -600.28 2452617
+552.96 -1363.84 2452617
+565.92 196.48 2452617
+7412.83 2071.68 2452617
+85.8 25.61 2452617
+879.07 -2185.76 2452617
+PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part
+POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452638]
+Database: default
+Table: ss_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 13
+ rawDataSize 186
+ totalSize 199
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part
+PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part
+POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+0.15 -241.22 2452638
+10171.1 660.48 2452638
+1327.08 57.97 2452638
+1413.19 178.08 2452638
+150.39 -162.12 2452638
+1524.33 494.37 2452638
+156.67 -4626.56 2452638
+181.03 -207.24 2452638
+1971.35 -488.25 2452638
+267.01 -3266.36 2452638
+317.87 -3775.38 2452638
+4133.98 -775.72 2452638
+4329.49 -4000.51 2452638
+PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: ss
+ Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean)
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ss_net_paid_inc_tax (type: float), ss_net_profit (type: float), ss_sold_date_sk (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col2 (type: int)
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: float), VALUE._col1 (type: float), VALUE._col2 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.ss_part
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ss_sold_date_sk
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.ss_part
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss
+PREHOOK: Output: default@ss_part
+POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss
+POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617
+POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part
+POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452617]
+Database: default
+Table: ss_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 11
+ rawDataSize 151
+ totalSize 162
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part
+PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part
+POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+10022.63 3952.8 2452617
+1765.07 -4648.8 2452617
+2.1 -2026.3 2452617
+2.99 -11.32 2452617
+3423.95 -3164.07 2452617
+5362.01 -600.28 2452617
+552.96 -1363.84 2452617
+565.92 196.48 2452617
+7412.83 2071.68 2452617
+85.8 25.61 2452617
+879.07 -2185.76 2452617
+PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part
+POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452638]
+Database: default
+Table: ss_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 13
+ rawDataSize 186
+ totalSize 199
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part
+PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part
+POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+0.15 -241.22 2452638
+10171.1 660.48 2452638
+1327.08 57.97 2452638
+1413.19 178.08 2452638
+150.39 -162.12 2452638
+1524.33 494.37 2452638
+156.67 -4626.56 2452638
+181.03 -207.24 2452638
+1971.35 -488.25 2452638
+267.01 -3266.36 2452638
+317.87 -3775.38 2452638
+4133.98 -775.72 2452638
+4329.49 -4000.51 2452638
+PREHOOK: query: -- SORT DYNAMIC PARTITION DISABLED
+
+explain insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT DYNAMIC PARTITION DISABLED
+
+explain insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: ss
+ Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean)
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: float), _col2 (type: float)
+ sort order: +++
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.ss_part
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ss_sold_date_sk
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.ss_part
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss
+PREHOOK: Output: default@ss_part
+POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss
+POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617
+POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part
+POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452617]
+Database: default
+Table: ss_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 11
+ rawDataSize 151
+ totalSize 162
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part
+PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part
+POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+10022.63 3952.8 2452617
+1765.07 -4648.8 2452617
+2.1 -2026.3 2452617
+2.99 -11.32 2452617
+3423.95 -3164.07 2452617
+5362.01 -600.28 2452617
+552.96 -1363.84 2452617
+565.92 196.48 2452617
+7412.83 2071.68 2452617
+85.8 25.61 2452617
+879.07 -2185.76 2452617
+PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part
+POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452638]
+Database: default
+Table: ss_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 13
+ rawDataSize 186
+ totalSize 199
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part
+PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part
+POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+0.15 -241.22 2452638
+10171.1 660.48 2452638
+1327.08 57.97 2452638
+1413.19 178.08 2452638
+150.39 -162.12 2452638
+1524.33 494.37 2452638
+156.67 -4626.56 2452638
+181.03 -207.24 2452638
+1971.35 -488.25 2452638
+267.01 -3266.36 2452638
+317.87 -3775.38 2452638
+4133.98 -775.72 2452638
+4329.49 -4000.51 2452638
+PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: ss
+ Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean)
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ss_net_paid_inc_tax (type: float), ss_net_profit (type: float), ss_sold_date_sk (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns: _col2 (type: int)
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: float), VALUE._col1 (type: float), VALUE._col2 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.ss_part
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ss_sold_date_sk
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.ss_part
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss
+PREHOOK: Output: default@ss_part
+POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss
+POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617
+POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part
+POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452617]
+Database: default
+Table: ss_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 11
+ rawDataSize 151
+ totalSize 162
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part
+PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part
+POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+10022.63 3952.8 2452617
+1765.07 -4648.8 2452617
+2.1 -2026.3 2452617
+2.99 -11.32 2452617
+3423.95 -3164.07 2452617
+5362.01 -600.28 2452617
+552.96 -1363.84 2452617
+565.92 196.48 2452617
+7412.83 2071.68 2452617
+85.8 25.61 2452617
+879.07 -2185.76 2452617
+PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part
+POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452638]
+Database: default
+Table: ss_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 13
+ rawDataSize 186
+ totalSize 199
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part
+PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part
+POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+0.15 -241.22 2452638
+10171.1 660.48 2452638
+1327.08 57.97 2452638
+1413.19 178.08 2452638
+150.39 -162.12 2452638
+1524.33 494.37 2452638
+156.67 -4626.56 2452638
+181.03 -207.24 2452638
+1971.35 -488.25 2452638
+267.01 -3266.36 2452638
+317.87 -3775.38 2452638
+4133.98 -775.72 2452638
+4329.49 -4000.51 2452638
+PREHOOK: query: -- VECTORIZATION IS ENABLED
+
+create table ss_orc (
+ss_sold_date_sk int,
+ss_net_paid_inc_tax float,
+ss_net_profit float) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ss_orc
+POSTHOOK: query: -- VECTORIZATION IS ENABLED
+
+create table ss_orc (
+ss_sold_date_sk int,
+ss_net_paid_inc_tax float,
+ss_net_profit float) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ss_orc
+PREHOOK: query: create table ss_part_orc (
+ss_net_paid_inc_tax float,
+ss_net_profit float)
+partitioned by (ss_sold_date_sk int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ss_part_orc
+POSTHOOK: query: create table ss_part_orc (
+ss_net_paid_inc_tax float,
+ss_net_profit float)
+partitioned by (ss_sold_date_sk int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ss_part_orc
+PREHOOK: query: insert overwrite table ss_orc select * from ss
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss
+PREHOOK: Output: default@ss_orc
+POSTHOOK: query: insert overwrite table ss_orc select * from ss
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss
+POSTHOOK: Output: default@ss_orc
+POSTHOOK: Lineage: ss_orc.ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_orc.ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_orc.ss_sold_date_sk SIMPLE [(ss)ss.FieldSchema(name:ss_sold_date_sk, type:int, comment:null), ]
+PREHOOK: query: drop table ss
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ss
+PREHOOK: Output: default@ss
+POSTHOOK: query: drop table ss
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ss
+POSTHOOK: Output: default@ss
+PREHOOK: query: drop table ss_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ss_part
+PREHOOK: Output: default@ss_part
+POSTHOOK: query: drop table ss_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ss_part
+POSTHOOK: Output: default@ss_part
+PREHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss_orc
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss_orc
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: ss_orc
+ Statistics: Num rows: 24 Data size: 288 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean)
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: float), _col2 (type: float)
+ sort order: +++
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.ss_part_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ss_sold_date_sk
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.ss_part_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss_orc
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_orc
+PREHOOK: Output: default@ss_part_orc
+POSTHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss_orc
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ group by ss_sold_date_sk,
+ ss_net_paid_inc_tax,
+ ss_net_profit
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_orc
+POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452617
+POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452638
+POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part_orc
+POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part_orc
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452617]
+Database: default
+Table: ss_part_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 11
+ rawDataSize 88
+ totalSize 433
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part_orc
+PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part_orc
+POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+10022.63 3952.8 2452617
+1765.07 -4648.8 2452617
+2.1 -2026.3 2452617
+2.99 -11.32 2452617
+3423.95 -3164.07 2452617
+5362.01 -600.28 2452617
+552.96 -1363.84 2452617
+565.92 196.48 2452617
+7412.83 2071.68 2452617
+85.8 25.61 2452617
+879.07 -2185.76 2452617
+PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part_orc
+POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part_orc
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452638]
+Database: default
+Table: ss_part_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 13
+ rawDataSize 104
+ totalSize 456
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part_orc
+PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part_orc
+POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+0.15 -241.22 2452638
+10171.1 660.48 2452638
+1327.08 57.97 2452638
+1413.19 178.08 2452638
+150.39 -162.12 2452638
+1524.33 494.37 2452638
+156.67 -4626.56 2452638
+181.03 -207.24 2452638
+1971.35 -488.25 2452638
+267.01 -3266.36 2452638
+317.87 -3775.38 2452638
+4133.98 -775.72 2452638
+4329.49 -4000.51 2452638
+PREHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss_orc
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss_orc
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: ss_orc
+ Statistics: Num rows: 24 Data size: 288 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean)
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ss_net_paid_inc_tax (type: float), ss_net_profit (type: float), ss_sold_date_sk (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns: _col2 (type: int)
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int)
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: float), VALUE._col1 (type: float), VALUE._col2 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.ss_part_orc
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ss_sold_date_sk
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.ss_part_orc
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss_orc
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_orc
+PREHOOK: Output: default@ss_part_orc
+POSTHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk)
+select ss_net_paid_inc_tax,
+ ss_net_profit,
+ ss_sold_date_sk
+ from ss_orc
+ where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638
+ distribute by ss_sold_date_sk
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_orc
+POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452617
+POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452638
+POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ]
+POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ]
+PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part_orc
+POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part_orc
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452617]
+Database: default
+Table: ss_part_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 11
+ rawDataSize 88
+ totalSize 433
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part_orc
+PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part_orc
+POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617
+#### A masked pattern was here ####
+10022.63 3952.8 2452617
+1765.07 -4648.8 2452617
+2.1 -2026.3 2452617
+2.99 -11.32 2452617
+3423.95 -3164.07 2452617
+5362.01 -600.28 2452617
+552.96 -1363.84 2452617
+565.92 196.48 2452617
+7412.83 2071.68 2452617
+85.8 25.61 2452617
+879.07 -2185.76 2452617
+PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@ss_part_orc
+POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@ss_part_orc
+# col_name data_type comment
+
+ss_net_paid_inc_tax float
+ss_net_profit float
+
+# Partition Information
+# col_name data_type comment
+
+ss_sold_date_sk int
+
+# Detailed Partition Information
+Partition Value: [2452638]
+Database: default
+Table: ss_part_orc
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 13
+ rawDataSize 104
+ totalSize 456
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ss_part_orc
+PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ss_part_orc
+POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638
+#### A masked pattern was here ####
+0.15 -241.22 2452638
+10171.1 660.48 2452638
+1327.08 57.97 2452638
+1413.19 178.08 2452638
+150.39 -162.12 2452638
+1524.33 494.37 2452638
+156.67 -4626.56 2452638
+181.03 -207.24 2452638
+1971.35 -488.25 2452638
+267.01 -3266.36 2452638
+317.87 -3775.38 2452638
+4133.98 -775.72 2452638
+4329.49 -4000.51 2452638
+PREHOOK: query: drop table ss_orc
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ss_orc
+PREHOOK: Output: default@ss_orc
+POSTHOOK: query: drop table ss_orc
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ss_orc
+POSTHOOK: Output: default@ss_orc
+PREHOOK: query: drop table ss_part_orc
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@ss_part_orc
+PREHOOK: Output: default@ss_part_orc
+POSTHOOK: query: drop table ss_part_orc
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@ss_part_orc
+POSTHOOK: Output: default@ss_part_orc
+PREHOOK: query: drop table if exists hive13_dp1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists hive13_dp1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists hive13_dp1 (
+ k1 int,
+ k2 int
+)
+PARTITIONED BY(`day` string)
+STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@hive13_dp1
+POSTHOOK: query: create table if not exists hive13_dp1 (
+ k1 int,
+ k2 int
+)
+PARTITIONED BY(`day` string)
+STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@hive13_dp1
+PREHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`)
+select
+ key k1,
+ count(value) k2,
+ "day" `day`
+from src
+group by "day", key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`)
+select
+ key k1,
+ count(value) k2,
+ "day" `day`
+from src
+group by "day", key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col2)
+ keys: 'day' (type: string), _col1 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col2 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.hive13_dp1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ day
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.hive13_dp1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table `hive13_dp1` partition(`day`)
+select
+ key k1,
+ count(value) k2,
+ "day" `day`
+from src
+group by "day", key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@hive13_dp1
+POSTHOOK: query: insert overwrite table `hive13_dp1` partition(`day`)
+select
+ key k1,
+ count(value) k2,
+ "day" `day`
+from src
+group by "day", key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@hive13_dp1@day=day
+POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from hive13_dp1 order by k1, k2 limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@hive13_dp1
+PREHOOK: Input: default@hive13_dp1@day=day
+#### A masked pattern was here ####
+POSTHOOK: query: select * from hive13_dp1 order by k1, k2 limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@hive13_dp1
+POSTHOOK: Input: default@hive13_dp1@day=day
+#### A masked pattern was here ####
+0 3 day
+2 1 day
+4 1 day
+5 3 day
+8 1 day
+PREHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`)
+select
+ key k1,
+ count(value) k2,
+ "day" `day`
+from src
+group by "day", key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`)
+select
+ key k1,
+ count(value) k2,
+ "day" `day`
+from src
+group by "day", key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col2)
+ keys: 'day' (type: string), _col1 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col2 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.hive13_dp1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ day
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.hive13_dp1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table `hive13_dp1` partition(`day`)
+select
+ key k1,
+ count(value) k2,
+ "day" `day`
+from src
+group by "day", key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@hive13_dp1
+POSTHOOK: query: insert overwrite table `hive13_dp1` partition(`day`)
+select
+ key k1,
+ count(value) k2,
+ "day" `day`
+from src
+group by "day", key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@hive13_dp1@day=day
+POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from hive13_dp1 order by k1, k2 limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@hive13_dp1
+PREHOOK: Input: default@hive13_dp1@day=day
+#### A masked pattern was here ####
+POSTHOOK: query: select * from hive13_dp1 order by k1, k2 limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@hive13_dp1
+POSTHOOK: Input: default@hive13_dp1@day=day
+#### A masked pattern was here ####
+0 3 day
+2 1 day
+4 1 day
+5 3 day
+8 1 day
+PREHOOK: query: drop table hive13_dp1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@hive13_dp1
+PREHOOK: Output: default@hive13_dp1
+POSTHOOK: query: drop table hive13_dp1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@hive13_dp1
+POSTHOOK: Output: default@hive13_dp1
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/enforce_order.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/enforce_order.q.out b/ql/src/test/results/clientpositive/llap/enforce_order.q.out
new file mode 100644
index 0000000..3e43088
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/enforce_order.q.out
@@ -0,0 +1,80 @@
+PREHOOK: query: drop table table_asc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table table_asc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table table_desc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table table_desc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table_asc
+POSTHOOK: query: create table table_asc(key string, value string) clustered by (key) sorted by (key ASC) into 1 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table_asc
+PREHOOK: query: create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@table_desc
+POSTHOOK: query: create table table_desc(key string, value string) clustered by (key) sorted by (key DESC) into 1 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@table_desc
+PREHOOK: query: insert overwrite table table_asc select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@table_asc
+POSTHOOK: query: insert overwrite table table_asc select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@table_asc
+POSTHOOK: Lineage: table_asc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_asc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table table_desc select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@table_desc
+POSTHOOK: query: insert overwrite table table_desc select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@table_desc
+POSTHOOK: Lineage: table_desc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_desc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from table_asc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@table_asc
+#### A masked pattern was here ####
+POSTHOOK: query: select * from table_asc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@table_asc
+#### A masked pattern was here ####
+0 val_0
+0 val_0
+0 val_0
+10 val_10
+100 val_100
+100 val_100
+103 val_103
+103 val_103
+104 val_104
+104 val_104
+PREHOOK: query: select * from table_desc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@table_desc
+#### A masked pattern was here ####
+POSTHOOK: query: select * from table_desc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@table_desc
+#### A masked pattern was here ####
+98 val_98
+98 val_98
+97 val_97
+97 val_97
+96 val_96
+95 val_95
+95 val_95
+92 val_92
+90 val_90
+90 val_90
[16/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/load_dyn_part1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/llap/load_dyn_part1.q.out
new file mode 100644
index 0000000..8488240
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/load_dyn_part1.q.out
@@ -0,0 +1,2215 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+show partitions srcpart
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@srcpart
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+show partitions srcpart
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@srcpart
+ds=2008-04-08/hr=11
+ds=2008-04-08/hr=12
+ds=2008-04-09/hr=11
+ds=2008-04-09/hr=12
+PREHOOK: query: create table if not exists nzhang_part1 like srcpart
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part1
+POSTHOOK: query: create table if not exists nzhang_part1 like srcpart
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part1
+PREHOOK: query: create table if not exists nzhang_part2 like srcpart
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part2
+POSTHOOK: query: create table if not exists nzhang_part2 like srcpart
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part2
+PREHOOK: query: describe extended nzhang_part1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part1
+POSTHOOK: query: describe extended nzhang_part1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part1
+key string default
+value string default
+ds string
+hr string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+#### A masked pattern was here ####
+PREHOOK: query: explain
+from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-2 is a root stage
+ Stage-3 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-3
+ Stage-4 depends on stages: Stage-0
+ Stage-1 depends on stages: Stage-3
+ Stage-5 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-2
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (ds <= '2008-04-08') (type: boolean)
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part1
+ Filter Operator
+ predicate: (ds > '2008-04-08') (type: boolean)
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), hr (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part2
+ Execution mode: llap
+
+ Stage: Stage-3
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds
+ hr
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part1
+
+ Stage: Stage-4
+ Stats-Aggr Operator
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ partition:
+ ds 2008-12-31
+ hr
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part2
+
+ Stage: Stage-5
+ Stats-Aggr Operator
+
+PREHOOK: query: from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part1
+PREHOOK: Output: default@nzhang_part2@ds=2008-12-31
+POSTHOOK: query: from srcpart
+insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part1@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@nzhang_part1@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@nzhang_part2@ds=2008-12-31/hr=11
+POSTHOOK: Output: default@nzhang_part2@ds=2008-12-31/hr=12
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show partitions nzhang_part1
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@nzhang_part1
+POSTHOOK: query: show partitions nzhang_part1
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@nzhang_part1
+ds=2008-04-08/hr=11
+ds=2008-04-08/hr=12
+PREHOOK: query: show partitions nzhang_part2
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@nzhang_part2
+POSTHOOK: query: show partitions nzhang_part2
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@nzhang_part2
+ds=2008-12-31/hr=11
+ds=2008-12-31/hr=12
+PREHOOK: query: select * from nzhang_part1 where ds is not null and hr is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part1
+PREHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=11
+PREHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_part1 where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part1
+POSTHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@nzhang_part1@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 12
+0 val_0 2008-04-08 12
+0 val_0 2008-04-08 12
+10 val_10 2008-04-08 11
+10 val_10 2008-04-08 12
+100 val_100 2008-04-08 11
+100 val_100 2008-04-08 11
+100 val_100 2008-04-08 12
+100 val_100 2008-04-08 12
+103 val_103 2008-04-08 11
+103 val_103 2008-04-08 11
+103 val_103 2008-04-08 12
+103 val_103 2008-04-08 12
+104 val_104 2008-04-08 11
+104 val_104 2008-04-08 11
+104 val_104 2008-04-08 12
+104 val_104 2008-04-08 12
+105 val_105 2008-04-08 11
+105 val_105 2008-04-08 12
+11 val_11 2008-04-08 11
+11 val_11 2008-04-08 12
+111 val_111 2008-04-08 11
+111 val_111 2008-04-08 12
+113 val_113 2008-04-08 11
+113 val_113 2008-04-08 11
+113 val_113 2008-04-08 12
+113 val_113 2008-04-08 12
+114 val_114 2008-04-08 11
+114 val_114 2008-04-08 12
+116 val_116 2008-04-08 11
+116 val_116 2008-04-08 12
+118 val_118 2008-04-08 11
+118 val_118 2008-04-08 11
+118 val_118 2008-04-08 12
+118 val_118 2008-04-08 12
+119 val_119 2008-04-08 11
+119 val_119 2008-04-08 11
+119 val_119 2008-04-08 11
+119 val_119 2008-04-08 12
+119 val_119 2008-04-08 12
+119 val_119 2008-04-08 12
+12 val_12 2008-04-08 11
+12 val_12 2008-04-08 11
+12 val_12 2008-04-08 12
+12 val_12 2008-04-08 12
+120 val_120 2008-04-08 11
+120 val_120 2008-04-08 11
+120 val_120 2008-04-08 12
+120 val_120 2008-04-08 12
+125 val_125 2008-04-08 11
+125 val_125 2008-04-08 11
+125 val_125 2008-04-08 12
+125 val_125 2008-04-08 12
+126 val_126 2008-04-08 11
+126 val_126 2008-04-08 12
+128 val_128 2008-04-08 11
+128 val_128 2008-04-08 11
+128 val_128 2008-04-08 11
+128 val_128 2008-04-08 12
+128 val_128 2008-04-08 12
+128 val_128 2008-04-08 12
+129 val_129 2008-04-08 11
+129 val_129 2008-04-08 11
+129 val_129 2008-04-08 12
+129 val_129 2008-04-08 12
+131 val_131 2008-04-08 11
+131 val_131 2008-04-08 12
+133 val_133 2008-04-08 11
+133 val_133 2008-04-08 12
+134 val_134 2008-04-08 11
+134 val_134 2008-04-08 11
+134 val_134 2008-04-08 12
+134 val_134 2008-04-08 12
+136 val_136 2008-04-08 11
+136 val_136 2008-04-08 12
+137 val_137 2008-04-08 11
+137 val_137 2008-04-08 11
+137 val_137 2008-04-08 12
+137 val_137 2008-04-08 12
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 12
+138 val_138 2008-04-08 12
+138 val_138 2008-04-08 12
+138 val_138 2008-04-08 12
+143 val_143 2008-04-08 11
+143 val_143 2008-04-08 12
+145 val_145 2008-04-08 11
+145 val_145 2008-04-08 12
+146 val_146 2008-04-08 11
+146 val_146 2008-04-08 11
+146 val_146 2008-04-08 12
+146 val_146 2008-04-08 12
+149 val_149 2008-04-08 11
+149 val_149 2008-04-08 11
+149 val_149 2008-04-08 12
+149 val_149 2008-04-08 12
+15 val_15 2008-04-08 11
+15 val_15 2008-04-08 11
+15 val_15 2008-04-08 12
+15 val_15 2008-04-08 12
+150 val_150 2008-04-08 11
+150 val_150 2008-04-08 12
+152 val_152 2008-04-08 11
+152 val_152 2008-04-08 11
+152 val_152 2008-04-08 12
+152 val_152 2008-04-08 12
+153 val_153 2008-04-08 11
+153 val_153 2008-04-08 12
+155 val_155 2008-04-08 11
+155 val_155 2008-04-08 12
+156 val_156 2008-04-08 11
+156 val_156 2008-04-08 12
+157 val_157 2008-04-08 11
+157 val_157 2008-04-08 12
+158 val_158 2008-04-08 11
+158 val_158 2008-04-08 12
+160 val_160 2008-04-08 11
+160 val_160 2008-04-08 12
+162 val_162 2008-04-08 11
+162 val_162 2008-04-08 12
+163 val_163 2008-04-08 11
+163 val_163 2008-04-08 12
+164 val_164 2008-04-08 11
+164 val_164 2008-04-08 11
+164 val_164 2008-04-08 12
+164 val_164 2008-04-08 12
+165 val_165 2008-04-08 11
+165 val_165 2008-04-08 11
+165 val_165 2008-04-08 12
+165 val_165 2008-04-08 12
+166 val_166 2008-04-08 11
+166 val_166 2008-04-08 12
+167 val_167 2008-04-08 11
+167 val_167 2008-04-08 11
+167 val_167 2008-04-08 11
+167 val_167 2008-04-08 12
+167 val_167 2008-04-08 12
+167 val_167 2008-04-08 12
+168 val_168 2008-04-08 11
+168 val_168 2008-04-08 12
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 12
+169 val_169 2008-04-08 12
+169 val_169 2008-04-08 12
+169 val_169 2008-04-08 12
+17 val_17 2008-04-08 11
+17 val_17 2008-04-08 12
+170 val_170 2008-04-08 11
+170 val_170 2008-04-08 12
+172 val_172 2008-04-08 11
+172 val_172 2008-04-08 11
+172 val_172 2008-04-08 12
+172 val_172 2008-04-08 12
+174 val_174 2008-04-08 11
+174 val_174 2008-04-08 11
+174 val_174 2008-04-08 12
+174 val_174 2008-04-08 12
+175 val_175 2008-04-08 11
+175 val_175 2008-04-08 11
+175 val_175 2008-04-08 12
+175 val_175 2008-04-08 12
+176 val_176 2008-04-08 11
+176 val_176 2008-04-08 11
+176 val_176 2008-04-08 12
+176 val_176 2008-04-08 12
+177 val_177 2008-04-08 11
+177 val_177 2008-04-08 12
+178 val_178 2008-04-08 11
+178 val_178 2008-04-08 12
+179 val_179 2008-04-08 11
+179 val_179 2008-04-08 11
+179 val_179 2008-04-08 12
+179 val_179 2008-04-08 12
+18 val_18 2008-04-08 11
+18 val_18 2008-04-08 11
+18 val_18 2008-04-08 12
+18 val_18 2008-04-08 12
+180 val_180 2008-04-08 11
+180 val_180 2008-04-08 12
+181 val_181 2008-04-08 11
+181 val_181 2008-04-08 12
+183 val_183 2008-04-08 11
+183 val_183 2008-04-08 12
+186 val_186 2008-04-08 11
+186 val_186 2008-04-08 12
+187 val_187 2008-04-08 11
+187 val_187 2008-04-08 11
+187 val_187 2008-04-08 11
+187 val_187 2008-04-08 12
+187 val_187 2008-04-08 12
+187 val_187 2008-04-08 12
+189 val_189 2008-04-08 11
+189 val_189 2008-04-08 12
+19 val_19 2008-04-08 11
+19 val_19 2008-04-08 12
+190 val_190 2008-04-08 11
+190 val_190 2008-04-08 12
+191 val_191 2008-04-08 11
+191 val_191 2008-04-08 11
+191 val_191 2008-04-08 12
+191 val_191 2008-04-08 12
+192 val_192 2008-04-08 11
+192 val_192 2008-04-08 12
+193 val_193 2008-04-08 11
+193 val_193 2008-04-08 11
+193 val_193 2008-04-08 11
+193 val_193 2008-04-08 12
+193 val_193 2008-04-08 12
+193 val_193 2008-04-08 12
+194 val_194 2008-04-08 11
+194 val_194 2008-04-08 12
+195 val_195 2008-04-08 11
+195 val_195 2008-04-08 11
+195 val_195 2008-04-08 12
+195 val_195 2008-04-08 12
+196 val_196 2008-04-08 11
+196 val_196 2008-04-08 12
+197 val_197 2008-04-08 11
+197 val_197 2008-04-08 11
+197 val_197 2008-04-08 12
+197 val_197 2008-04-08 12
+199 val_199 2008-04-08 11
+199 val_199 2008-04-08 11
+199 val_199 2008-04-08 11
+199 val_199 2008-04-08 12
+199 val_199 2008-04-08 12
+199 val_199 2008-04-08 12
+2 val_2 2008-04-08 11
+2 val_2 2008-04-08 12
+20 val_20 2008-04-08 11
+20 val_20 2008-04-08 12
+200 val_200 2008-04-08 11
+200 val_200 2008-04-08 11
+200 val_200 2008-04-08 12
+200 val_200 2008-04-08 12
+201 val_201 2008-04-08 11
+201 val_201 2008-04-08 12
+202 val_202 2008-04-08 11
+202 val_202 2008-04-08 12
+203 val_203 2008-04-08 11
+203 val_203 2008-04-08 11
+203 val_203 2008-04-08 12
+203 val_203 2008-04-08 12
+205 val_205 2008-04-08 11
+205 val_205 2008-04-08 11
+205 val_205 2008-04-08 12
+205 val_205 2008-04-08 12
+207 val_207 2008-04-08 11
+207 val_207 2008-04-08 11
+207 val_207 2008-04-08 12
+207 val_207 2008-04-08 12
+208 val_208 2008-04-08 11
+208 val_208 2008-04-08 11
+208 val_208 2008-04-08 11
+208 val_208 2008-04-08 12
+208 val_208 2008-04-08 12
+208 val_208 2008-04-08 12
+209 val_209 2008-04-08 11
+209 val_209 2008-04-08 11
+209 val_209 2008-04-08 12
+209 val_209 2008-04-08 12
+213 val_213 2008-04-08 11
+213 val_213 2008-04-08 11
+213 val_213 2008-04-08 12
+213 val_213 2008-04-08 12
+214 val_214 2008-04-08 11
+214 val_214 2008-04-08 12
+216 val_216 2008-04-08 11
+216 val_216 2008-04-08 11
+216 val_216 2008-04-08 12
+216 val_216 2008-04-08 12
+217 val_217 2008-04-08 11
+217 val_217 2008-04-08 11
+217 val_217 2008-04-08 12
+217 val_217 2008-04-08 12
+218 val_218 2008-04-08 11
+218 val_218 2008-04-08 12
+219 val_219 2008-04-08 11
+219 val_219 2008-04-08 11
+219 val_219 2008-04-08 12
+219 val_219 2008-04-08 12
+221 val_221 2008-04-08 11
+221 val_221 2008-04-08 11
+221 val_221 2008-04-08 12
+221 val_221 2008-04-08 12
+222 val_222 2008-04-08 11
+222 val_222 2008-04-08 12
+223 val_223 2008-04-08 11
+223 val_223 2008-04-08 11
+223 val_223 2008-04-08 12
+223 val_223 2008-04-08 12
+224 val_224 2008-04-08 11
+224 val_224 2008-04-08 11
+224 val_224 2008-04-08 12
+224 val_224 2008-04-08 12
+226 val_226 2008-04-08 11
+226 val_226 2008-04-08 12
+228 val_228 2008-04-08 11
+228 val_228 2008-04-08 12
+229 val_229 2008-04-08 11
+229 val_229 2008-04-08 11
+229 val_229 2008-04-08 12
+229 val_229 2008-04-08 12
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+233 val_233 2008-04-08 11
+233 val_233 2008-04-08 11
+233 val_233 2008-04-08 12
+233 val_233 2008-04-08 12
+235 val_235 2008-04-08 11
+235 val_235 2008-04-08 12
+237 val_237 2008-04-08 11
+237 val_237 2008-04-08 11
+237 val_237 2008-04-08 12
+237 val_237 2008-04-08 12
+238 val_238 2008-04-08 11
+238 val_238 2008-04-08 11
+238 val_238 2008-04-08 12
+238 val_238 2008-04-08 12
+239 val_239 2008-04-08 11
+239 val_239 2008-04-08 11
+239 val_239 2008-04-08 12
+239 val_239 2008-04-08 12
+24 val_24 2008-04-08 11
+24 val_24 2008-04-08 11
+24 val_24 2008-04-08 12
+24 val_24 2008-04-08 12
+241 val_241 2008-04-08 11
+241 val_241 2008-04-08 12
+242 val_242 2008-04-08 11
+242 val_242 2008-04-08 11
+242 val_242 2008-04-08 12
+242 val_242 2008-04-08 12
+244 val_244 2008-04-08 11
+244 val_244 2008-04-08 12
+247 val_247 2008-04-08 11
+247 val_247 2008-04-08 12
+248 val_248 2008-04-08 11
+248 val_248 2008-04-08 12
+249 val_249 2008-04-08 11
+249 val_249 2008-04-08 12
+252 val_252 2008-04-08 11
+252 val_252 2008-04-08 12
+255 val_255 2008-04-08 11
+255 val_255 2008-04-08 11
+255 val_255 2008-04-08 12
+255 val_255 2008-04-08 12
+256 val_256 2008-04-08 11
+256 val_256 2008-04-08 11
+256 val_256 2008-04-08 12
+256 val_256 2008-04-08 12
+257 val_257 2008-04-08 11
+257 val_257 2008-04-08 12
+258 val_258 2008-04-08 11
+258 val_258 2008-04-08 12
+26 val_26 2008-04-08 11
+26 val_26 2008-04-08 11
+26 val_26 2008-04-08 12
+26 val_26 2008-04-08 12
+260 val_260 2008-04-08 11
+260 val_260 2008-04-08 12
+262 val_262 2008-04-08 11
+262 val_262 2008-04-08 12
+263 val_263 2008-04-08 11
+263 val_263 2008-04-08 12
+265 val_265 2008-04-08 11
+265 val_265 2008-04-08 11
+265 val_265 2008-04-08 12
+265 val_265 2008-04-08 12
+266 val_266 2008-04-08 11
+266 val_266 2008-04-08 12
+27 val_27 2008-04-08 11
+27 val_27 2008-04-08 12
+272 val_272 2008-04-08 11
+272 val_272 2008-04-08 11
+272 val_272 2008-04-08 12
+272 val_272 2008-04-08 12
+273 val_273 2008-04-08 11
+273 val_273 2008-04-08 11
+273 val_273 2008-04-08 11
+273 val_273 2008-04-08 12
+273 val_273 2008-04-08 12
+273 val_273 2008-04-08 12
+274 val_274 2008-04-08 11
+274 val_274 2008-04-08 12
+275 val_275 2008-04-08 11
+275 val_275 2008-04-08 12
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 12
+277 val_277 2008-04-08 12
+277 val_277 2008-04-08 12
+277 val_277 2008-04-08 12
+278 val_278 2008-04-08 11
+278 val_278 2008-04-08 11
+278 val_278 2008-04-08 12
+278 val_278 2008-04-08 12
+28 val_28 2008-04-08 11
+28 val_28 2008-04-08 12
+280 val_280 2008-04-08 11
+280 val_280 2008-04-08 11
+280 val_280 2008-04-08 12
+280 val_280 2008-04-08 12
+281 val_281 2008-04-08 11
+281 val_281 2008-04-08 11
+281 val_281 2008-04-08 12
+281 val_281 2008-04-08 12
+282 val_282 2008-04-08 11
+282 val_282 2008-04-08 11
+282 val_282 2008-04-08 12
+282 val_282 2008-04-08 12
+283 val_283 2008-04-08 11
+283 val_283 2008-04-08 12
+284 val_284 2008-04-08 11
+284 val_284 2008-04-08 12
+285 val_285 2008-04-08 11
+285 val_285 2008-04-08 12
+286 val_286 2008-04-08 11
+286 val_286 2008-04-08 12
+287 val_287 2008-04-08 11
+287 val_287 2008-04-08 12
+288 val_288 2008-04-08 11
+288 val_288 2008-04-08 11
+288 val_288 2008-04-08 12
+288 val_288 2008-04-08 12
+289 val_289 2008-04-08 11
+289 val_289 2008-04-08 12
+291 val_291 2008-04-08 11
+291 val_291 2008-04-08 12
+292 val_292 2008-04-08 11
+292 val_292 2008-04-08 12
+296 val_296 2008-04-08 11
+296 val_296 2008-04-08 12
+298 val_298 2008-04-08 11
+298 val_298 2008-04-08 11
+298 val_298 2008-04-08 11
+298 val_298 2008-04-08 12
+298 val_298 2008-04-08 12
+298 val_298 2008-04-08 12
+30 val_30 2008-04-08 11
+30 val_30 2008-04-08 12
+302 val_302 2008-04-08 11
+302 val_302 2008-04-08 12
+305 val_305 2008-04-08 11
+305 val_305 2008-04-08 12
+306 val_306 2008-04-08 11
+306 val_306 2008-04-08 12
+307 val_307 2008-04-08 11
+307 val_307 2008-04-08 11
+307 val_307 2008-04-08 12
+307 val_307 2008-04-08 12
+308 val_308 2008-04-08 11
+308 val_308 2008-04-08 12
+309 val_309 2008-04-08 11
+309 val_309 2008-04-08 11
+309 val_309 2008-04-08 12
+309 val_309 2008-04-08 12
+310 val_310 2008-04-08 11
+310 val_310 2008-04-08 12
+311 val_311 2008-04-08 11
+311 val_311 2008-04-08 11
+311 val_311 2008-04-08 11
+311 val_311 2008-04-08 12
+311 val_311 2008-04-08 12
+311 val_311 2008-04-08 12
+315 val_315 2008-04-08 11
+315 val_315 2008-04-08 12
+316 val_316 2008-04-08 11
+316 val_316 2008-04-08 11
+316 val_316 2008-04-08 11
+316 val_316 2008-04-08 12
+316 val_316 2008-04-08 12
+316 val_316 2008-04-08 12
+317 val_317 2008-04-08 11
+317 val_317 2008-04-08 11
+317 val_317 2008-04-08 12
+317 val_317 2008-04-08 12
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 12
+318 val_318 2008-04-08 12
+318 val_318 2008-04-08 12
+321 val_321 2008-04-08 11
+321 val_321 2008-04-08 11
+321 val_321 2008-04-08 12
+321 val_321 2008-04-08 12
+322 val_322 2008-04-08 11
+322 val_322 2008-04-08 11
+322 val_322 2008-04-08 12
+322 val_322 2008-04-08 12
+323 val_323 2008-04-08 11
+323 val_323 2008-04-08 12
+325 val_325 2008-04-08 11
+325 val_325 2008-04-08 11
+325 val_325 2008-04-08 12
+325 val_325 2008-04-08 12
+327 val_327 2008-04-08 11
+327 val_327 2008-04-08 11
+327 val_327 2008-04-08 11
+327 val_327 2008-04-08 12
+327 val_327 2008-04-08 12
+327 val_327 2008-04-08 12
+33 val_33 2008-04-08 11
+33 val_33 2008-04-08 12
+331 val_331 2008-04-08 11
+331 val_331 2008-04-08 11
+331 val_331 2008-04-08 12
+331 val_331 2008-04-08 12
+332 val_332 2008-04-08 11
+332 val_332 2008-04-08 12
+333 val_333 2008-04-08 11
+333 val_333 2008-04-08 11
+333 val_333 2008-04-08 12
+333 val_333 2008-04-08 12
+335 val_335 2008-04-08 11
+335 val_335 2008-04-08 12
+336 val_336 2008-04-08 11
+336 val_336 2008-04-08 12
+338 val_338 2008-04-08 11
+338 val_338 2008-04-08 12
+339 val_339 2008-04-08 11
+339 val_339 2008-04-08 12
+34 val_34 2008-04-08 11
+34 val_34 2008-04-08 12
+341 val_341 2008-04-08 11
+341 val_341 2008-04-08 12
+342 val_342 2008-04-08 11
+342 val_342 2008-04-08 11
+342 val_342 2008-04-08 12
+342 val_342 2008-04-08 12
+344 val_344 2008-04-08 11
+344 val_344 2008-04-08 11
+344 val_344 2008-04-08 12
+344 val_344 2008-04-08 12
+345 val_345 2008-04-08 11
+345 val_345 2008-04-08 12
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+35 val_35 2008-04-08 11
+35 val_35 2008-04-08 11
+35 val_35 2008-04-08 11
+35 val_35 2008-04-08 12
+35 val_35 2008-04-08 12
+35 val_35 2008-04-08 12
+351 val_351 2008-04-08 11
+351 val_351 2008-04-08 12
+353 val_353 2008-04-08 11
+353 val_353 2008-04-08 11
+353 val_353 2008-04-08 12
+353 val_353 2008-04-08 12
+356 val_356 2008-04-08 11
+356 val_356 2008-04-08 12
+360 val_360 2008-04-08 11
+360 val_360 2008-04-08 12
+362 val_362 2008-04-08 11
+362 val_362 2008-04-08 12
+364 val_364 2008-04-08 11
+364 val_364 2008-04-08 12
+365 val_365 2008-04-08 11
+365 val_365 2008-04-08 12
+366 val_366 2008-04-08 11
+366 val_366 2008-04-08 12
+367 val_367 2008-04-08 11
+367 val_367 2008-04-08 11
+367 val_367 2008-04-08 12
+367 val_367 2008-04-08 12
+368 val_368 2008-04-08 11
+368 val_368 2008-04-08 12
+369 val_369 2008-04-08 11
+369 val_369 2008-04-08 11
+369 val_369 2008-04-08 11
+369 val_369 2008-04-08 12
+369 val_369 2008-04-08 12
+369 val_369 2008-04-08 12
+37 val_37 2008-04-08 11
+37 val_37 2008-04-08 11
+37 val_37 2008-04-08 12
+37 val_37 2008-04-08 12
+373 val_373 2008-04-08 11
+373 val_373 2008-04-08 12
+374 val_374 2008-04-08 11
+374 val_374 2008-04-08 12
+375 val_375 2008-04-08 11
+375 val_375 2008-04-08 12
+377 val_377 2008-04-08 11
+377 val_377 2008-04-08 12
+378 val_378 2008-04-08 11
+378 val_378 2008-04-08 12
+379 val_379 2008-04-08 11
+379 val_379 2008-04-08 12
+382 val_382 2008-04-08 11
+382 val_382 2008-04-08 11
+382 val_382 2008-04-08 12
+382 val_382 2008-04-08 12
+384 val_384 2008-04-08 11
+384 val_384 2008-04-08 11
+384 val_384 2008-04-08 11
+384 val_384 2008-04-08 12
+384 val_384 2008-04-08 12
+384 val_384 2008-04-08 12
+386 val_386 2008-04-08 11
+386 val_386 2008-04-08 12
+389 val_389 2008-04-08 11
+389 val_389 2008-04-08 12
+392 val_392 2008-04-08 11
+392 val_392 2008-04-08 12
+393 val_393 2008-04-08 11
+393 val_393 2008-04-08 12
+394 val_394 2008-04-08 11
+394 val_394 2008-04-08 12
+395 val_395 2008-04-08 11
+395 val_395 2008-04-08 11
+395 val_395 2008-04-08 12
+395 val_395 2008-04-08 12
+396 val_396 2008-04-08 11
+396 val_396 2008-04-08 11
+396 val_396 2008-04-08 11
+396 val_396 2008-04-08 12
+396 val_396 2008-04-08 12
+396 val_396 2008-04-08 12
+397 val_397 2008-04-08 11
+397 val_397 2008-04-08 11
+397 val_397 2008-04-08 12
+397 val_397 2008-04-08 12
+399 val_399 2008-04-08 11
+399 val_399 2008-04-08 11
+399 val_399 2008-04-08 12
+399 val_399 2008-04-08 12
+4 val_4 2008-04-08 11
+4 val_4 2008-04-08 12
+400 val_400 2008-04-08 11
+400 val_400 2008-04-08 12
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+402 val_402 2008-04-08 11
+402 val_402 2008-04-08 12
+403 val_403 2008-04-08 11
+403 val_403 2008-04-08 11
+403 val_403 2008-04-08 11
+403 val_403 2008-04-08 12
+403 val_403 2008-04-08 12
+403 val_403 2008-04-08 12
+404 val_404 2008-04-08 11
+404 val_404 2008-04-08 11
+404 val_404 2008-04-08 12
+404 val_404 2008-04-08 12
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 12
+406 val_406 2008-04-08 12
+406 val_406 2008-04-08 12
+406 val_406 2008-04-08 12
+407 val_407 2008-04-08 11
+407 val_407 2008-04-08 12
+409 val_409 2008-04-08 11
+409 val_409 2008-04-08 11
+409 val_409 2008-04-08 11
+409 val_409 2008-04-08 12
+409 val_409 2008-04-08 12
+409 val_409 2008-04-08 12
+41 val_41 2008-04-08 11
+41 val_41 2008-04-08 12
+411 val_411 2008-04-08 11
+411 val_411 2008-04-08 12
+413 val_413 2008-04-08 11
+413 val_413 2008-04-08 11
+413 val_413 2008-04-08 12
+413 val_413 2008-04-08 12
+414 val_414 2008-04-08 11
+414 val_414 2008-04-08 11
+414 val_414 2008-04-08 12
+414 val_414 2008-04-08 12
+417 val_417 2008-04-08 11
+417 val_417 2008-04-08 11
+417 val_417 2008-04-08 11
+417 val_417 2008-04-08 12
+417 val_417 2008-04-08 12
+417 val_417 2008-04-08 12
+418 val_418 2008-04-08 11
+418 val_418 2008-04-08 12
+419 val_419 2008-04-08 11
+419 val_419 2008-04-08 12
+42 val_42 2008-04-08 11
+42 val_42 2008-04-08 11
+42 val_42 2008-04-08 12
+42 val_42 2008-04-08 12
+421 val_421 2008-04-08 11
+421 val_421 2008-04-08 12
+424 val_424 2008-04-08 11
+424 val_424 2008-04-08 11
+424 val_424 2008-04-08 12
+424 val_424 2008-04-08 12
+427 val_427 2008-04-08 11
+427 val_427 2008-04-08 12
+429 val_429 2008-04-08 11
+429 val_429 2008-04-08 11
+429 val_429 2008-04-08 12
+429 val_429 2008-04-08 12
+43 val_43 2008-04-08 11
+43 val_43 2008-04-08 12
+430 val_430 2008-04-08 11
+430 val_430 2008-04-08 11
+430 val_430 2008-04-08 11
+430 val_430 2008-04-08 12
+430 val_430 2008-04-08 12
+430 val_430 2008-04-08 12
+431 val_431 2008-04-08 11
+431 val_431 2008-04-08 11
+431 val_431 2008-04-08 11
+431 val_431 2008-04-08 12
+431 val_431 2008-04-08 12
+431 val_431 2008-04-08 12
+432 val_432 2008-04-08 11
+432 val_432 2008-04-08 12
+435 val_435 2008-04-08 11
+435 val_435 2008-04-08 12
+436 val_436 2008-04-08 11
+436 val_436 2008-04-08 12
+437 val_437 2008-04-08 11
+437 val_437 2008-04-08 12
+438 val_438 2008-04-08 11
+438 val_438 2008-04-08 11
+438 val_438 2008-04-08 11
+438 val_438 2008-04-08 12
+438 val_438 2008-04-08 12
+438 val_438 2008-04-08 12
+439 val_439 2008-04-08 11
+439 val_439 2008-04-08 11
+439 val_439 2008-04-08 12
+439 val_439 2008-04-08 12
+44 val_44 2008-04-08 11
+44 val_44 2008-04-08 12
+443 val_443 2008-04-08 11
+443 val_443 2008-04-08 12
+444 val_444 2008-04-08 11
+444 val_444 2008-04-08 12
+446 val_446 2008-04-08 11
+446 val_446 2008-04-08 12
+448 val_448 2008-04-08 11
+448 val_448 2008-04-08 12
+449 val_449 2008-04-08 11
+449 val_449 2008-04-08 12
+452 val_452 2008-04-08 11
+452 val_452 2008-04-08 12
+453 val_453 2008-04-08 11
+453 val_453 2008-04-08 12
+454 val_454 2008-04-08 11
+454 val_454 2008-04-08 11
+454 val_454 2008-04-08 11
+454 val_454 2008-04-08 12
+454 val_454 2008-04-08 12
+454 val_454 2008-04-08 12
+455 val_455 2008-04-08 11
+455 val_455 2008-04-08 12
+457 val_457 2008-04-08 11
+457 val_457 2008-04-08 12
+458 val_458 2008-04-08 11
+458 val_458 2008-04-08 11
+458 val_458 2008-04-08 12
+458 val_458 2008-04-08 12
+459 val_459 2008-04-08 11
+459 val_459 2008-04-08 11
+459 val_459 2008-04-08 12
+459 val_459 2008-04-08 12
+460 val_460 2008-04-08 11
+460 val_460 2008-04-08 12
+462 val_462 2008-04-08 11
+462 val_462 2008-04-08 11
+462 val_462 2008-04-08 12
+462 val_462 2008-04-08 12
+463 val_463 2008-04-08 11
+463 val_463 2008-04-08 11
+463 val_463 2008-04-08 12
+463 val_463 2008-04-08 12
+466 val_466 2008-04-08 11
+466 val_466 2008-04-08 11
+466 val_466 2008-04-08 11
+466 val_466 2008-04-08 12
+466 val_466 2008-04-08 12
+466 val_466 2008-04-08 12
+467 val_467 2008-04-08 11
+467 val_467 2008-04-08 12
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 12
+468 val_468 2008-04-08 12
+468 val_468 2008-04-08 12
+468 val_468 2008-04-08 12
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+47 val_47 2008-04-08 11
+47 val_47 2008-04-08 12
+470 val_470 2008-04-08 11
+470 val_470 2008-04-08 12
+472 val_472 2008-04-08 11
+472 val_472 2008-04-08 12
+475 val_475 2008-04-08 11
+475 val_475 2008-04-08 12
+477 val_477 2008-04-08 11
+477 val_477 2008-04-08 12
+478 val_478 2008-04-08 11
+478 val_478 2008-04-08 11
+478 val_478 2008-04-08 12
+478 val_478 2008-04-08 12
+479 val_479 2008-04-08 11
+479 val_479 2008-04-08 12
+480 val_480 2008-04-08 11
+480 val_480 2008-04-08 11
+480 val_480 2008-04-08 11
+480 val_480 2008-04-08 12
+480 val_480 2008-04-08 12
+480 val_480 2008-04-08 12
+481 val_481 2008-04-08 11
+481 val_481 2008-04-08 12
+482 val_482 2008-04-08 11
+482 val_482 2008-04-08 12
+483 val_483 2008-04-08 11
+483 val_483 2008-04-08 12
+484 val_484 2008-04-08 11
+484 val_484 2008-04-08 12
+485 val_485 2008-04-08 11
+485 val_485 2008-04-08 12
+487 val_487 2008-04-08 11
+487 val_487 2008-04-08 12
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 12
+489 val_489 2008-04-08 12
+489 val_489 2008-04-08 12
+489 val_489 2008-04-08 12
+490 val_490 2008-04-08 11
+490 val_490 2008-04-08 12
+491 val_491 2008-04-08 11
+491 val_491 2008-04-08 12
+492 val_492 2008-04-08 11
+492 val_492 2008-04-08 11
+492 val_492 2008-04-08 12
+492 val_492 2008-04-08 12
+493 val_493 2008-04-08 11
+493 val_493 2008-04-08 12
+494 val_494 2008-04-08 11
+494 val_494 2008-04-08 12
+495 val_495 2008-04-08 11
+495 val_495 2008-04-08 12
+496 val_496 2008-04-08 11
+496 val_496 2008-04-08 12
+497 val_497 2008-04-08 11
+497 val_497 2008-04-08 12
+498 val_498 2008-04-08 11
+498 val_498 2008-04-08 11
+498 val_498 2008-04-08 11
+498 val_498 2008-04-08 12
+498 val_498 2008-04-08 12
+498 val_498 2008-04-08 12
+5 val_5 2008-04-08 11
+5 val_5 2008-04-08 11
+5 val_5 2008-04-08 11
+5 val_5 2008-04-08 12
+5 val_5 2008-04-08 12
+5 val_5 2008-04-08 12
+51 val_51 2008-04-08 11
+51 val_51 2008-04-08 11
+51 val_51 2008-04-08 12
+51 val_51 2008-04-08 12
+53 val_53 2008-04-08 11
+53 val_53 2008-04-08 12
+54 val_54 2008-04-08 11
+54 val_54 2008-04-08 12
+57 val_57 2008-04-08 11
+57 val_57 2008-04-08 12
+58 val_58 2008-04-08 11
+58 val_58 2008-04-08 11
+58 val_58 2008-04-08 12
+58 val_58 2008-04-08 12
+64 val_64 2008-04-08 11
+64 val_64 2008-04-08 12
+65 val_65 2008-04-08 11
+65 val_65 2008-04-08 12
+66 val_66 2008-04-08 11
+66 val_66 2008-04-08 12
+67 val_67 2008-04-08 11
+67 val_67 2008-04-08 11
+67 val_67 2008-04-08 12
+67 val_67 2008-04-08 12
+69 val_69 2008-04-08 11
+69 val_69 2008-04-08 12
+70 val_70 2008-04-08 11
+70 val_70 2008-04-08 11
+70 val_70 2008-04-08 11
+70 val_70 2008-04-08 12
+70 val_70 2008-04-08 12
+70 val_70 2008-04-08 12
+72 val_72 2008-04-08 11
+72 val_72 2008-04-08 11
+72 val_72 2008-04-08 12
+72 val_72 2008-04-08 12
+74 val_74 2008-04-08 11
+74 val_74 2008-04-08 12
+76 val_76 2008-04-08 11
+76 val_76 2008-04-08 11
+76 val_76 2008-04-08 12
+76 val_76 2008-04-08 12
+77 val_77 2008-04-08 11
+77 val_77 2008-04-08 12
+78 val_78 2008-04-08 11
+78 val_78 2008-04-08 12
+8 val_8 2008-04-08 11
+8 val_8 2008-04-08 12
+80 val_80 2008-04-08 11
+80 val_80 2008-04-08 12
+82 val_82 2008-04-08 11
+82 val_82 2008-04-08 12
+83 val_83 2008-04-08 11
+83 val_83 2008-04-08 11
+83 val_83 2008-04-08 12
+83 val_83 2008-04-08 12
+84 val_84 2008-04-08 11
+84 val_84 2008-04-08 11
+84 val_84 2008-04-08 12
+84 val_84 2008-04-08 12
+85 val_85 2008-04-08 11
+85 val_85 2008-04-08 12
+86 val_86 2008-04-08 11
+86 val_86 2008-04-08 12
+87 val_87 2008-04-08 11
+87 val_87 2008-04-08 12
+9 val_9 2008-04-08 11
+9 val_9 2008-04-08 12
+90 val_90 2008-04-08 11
+90 val_90 2008-04-08 11
+90 val_90 2008-04-08 11
+90 val_90 2008-04-08 12
+90 val_90 2008-04-08 12
+90 val_90 2008-04-08 12
+92 val_92 2008-04-08 11
+92 val_92 2008-04-08 12
+95 val_95 2008-04-08 11
+95 val_95 2008-04-08 11
+95 val_95 2008-04-08 12
+95 val_95 2008-04-08 12
+96 val_96 2008-04-08 11
+96 val_96 2008-04-08 12
+97 val_97 2008-04-08 11
+97 val_97 2008-04-08 11
+97 val_97 2008-04-08 12
+97 val_97 2008-04-08 12
+98 val_98 2008-04-08 11
+98 val_98 2008-04-08 11
+98 val_98 2008-04-08 12
+98 val_98 2008-04-08 12
+PREHOOK: query: select * from nzhang_part2 where ds is not null and hr is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part2
+PREHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=11
+PREHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_part2 where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part2
+POSTHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=11
+POSTHOOK: Input: default@nzhang_part2@ds=2008-12-31/hr=12
+#### A masked pattern was here ####
+0 val_0 2008-12-31 11
+0 val_0 2008-12-31 11
+0 val_0 2008-12-31 11
+0 val_0 2008-12-31 12
+0 val_0 2008-12-31 12
+0 val_0 2008-12-31 12
+10 val_10 2008-12-31 11
+10 val_10 2008-12-31 12
+100 val_100 2008-12-31 11
+100 val_100 2008-12-31 11
+100 val_100 2008-12-31 12
+100 val_100 2008-12-31 12
+103 val_103 2008-12-31 11
+103 val_103 2008-12-31 11
+103 val_103 2008-12-31 12
+103 val_103 2008-12-31 12
+104 val_104 2008-12-31 11
+104 val_104 2008-12-31 11
+104 val_104 2008-12-31 12
+104 val_104 2008-12-31 12
+105 val_105 2008-12-31 11
+105 val_105 2008-12-31 12
+11 val_11 2008-12-31 11
+11 val_11 2008-12-31 12
+111 val_111 2008-12-31 11
+111 val_111 2008-12-31 12
+113 val_113 2008-12-31 11
+113 val_113 2008-12-31 11
+113 val_113 2008-12-31 12
+113 val_113 2008-12-31 12
+114 val_114 2008-12-31 11
+114 val_114 2008-12-31 12
+116 val_116 2008-12-31 11
+116 val_116 2008-12-31 12
+118 val_118 2008-12-31 11
+118 val_118 2008-12-31 11
+118 val_118 2008-12-31 12
+118 val_118 2008-12-31 12
+119 val_119 2008-12-31 11
+119 val_119 2008-12-31 11
+119 val_119 2008-12-31 11
+119 val_119 2008-12-31 12
+119 val_119 2008-12-31 12
+119 val_119 2008-12-31 12
+12 val_12 2008-12-31 11
+12 val_12 2008-12-31 11
+12 val_12 2008-12-31 12
+12 val_12 2008-12-31 12
+120 val_120 2008-12-31 11
+120 val_120 2008-12-31 11
+120 val_120 2008-12-31 12
+120 val_120 2008-12-31 12
+125 val_125 2008-12-31 11
+125 val_125 2008-12-31 11
+125 val_125 2008-12-31 12
+125 val_125 2008-12-31 12
+126 val_126 2008-12-31 11
+126 val_126 2008-12-31 12
+128 val_128 2008-12-31 11
+128 val_128 2008-12-31 11
+128 val_128 2008-12-31 11
+128 val_128 2008-12-31 12
+128 val_128 2008-12-31 12
+128 val_128 2008-12-31 12
+129 val_129 2008-12-31 11
+129 val_129 2008-12-31 11
+129 val_129 2008-12-31 12
+129 val_129 2008-12-31 12
+131 val_131 2008-12-31 11
+131 val_131 2008-12-31 12
+133 val_133 2008-12-31 11
+133 val_133 2008-12-31 12
+134 val_134 2008-12-31 11
+134 val_134 2008-12-31 11
+134 val_134 2008-12-31 12
+134 val_134 2008-12-31 12
+136 val_136 2008-12-31 11
+136 val_136 2008-12-31 12
+137 val_137 2008-12-31 11
+137 val_137 2008-12-31 11
+137 val_137 2008-12-31 12
+137 val_137 2008-12-31 12
+138 val_138 2008-12-31 11
+138 val_138 2008-12-31 11
+138 val_138 2008-12-31 11
+138 val_138 2008-12-31 11
+138 val_138 2008-12-31 12
+138 val_138 2008-12-31 12
+138 val_138 2008-12-31 12
+138 val_138 2008-12-31 12
+143 val_143 2008-12-31 11
+143 val_143 2008-12-31 12
+145 val_145 2008-12-31 11
+145 val_145 2008-12-31 12
+146 val_146 2008-12-31 11
+146 val_146 2008-12-31 11
+146 val_146 2008-12-31 12
+146 val_146 2008-12-31 12
+149 val_149 2008-12-31 11
+149 val_149 2008-12-31 11
+149 val_149 2008-12-31 12
+149 val_149 2008-12-31 12
+15 val_15 2008-12-31 11
+15 val_15 2008-12-31 11
+15 val_15 2008-12-31 12
+15 val_15 2008-12-31 12
+150 val_150 2008-12-31 11
+150 val_150 2008-12-31 12
+152 val_152 2008-12-31 11
+152 val_152 2008-12-31 11
+152 val_152 2008-12-31 12
+152 val_152 2008-12-31 12
+153 val_153 2008-12-31 11
+153 val_153 2008-12-31 12
+155 val_155 2008-12-31 11
+155 val_155 2008-12-31 12
+156 val_156 2008-12-31 11
+156 val_156 2008-12-31 12
+157 val_157 2008-12-31 11
+157 val_157 2008-12-31 12
+158 val_158 2008-12-31 11
+158 val_158 2008-12-31 12
+160 val_160 2008-12-31 11
+160 val_160 2008-12-31 12
+162 val_162 2008-12-31 11
+162 val_162 2008-12-31 12
+163 val_163 2008-12-31 11
+163 val_163 2008-12-31 12
+164 val_164 2008-12-31 11
+164 val_164 2008-12-31 11
+164 val_164 2008-12-31 12
+164 val_164 2008-12-31 12
+165 val_165 2008-12-31 11
+165 val_165 2008-12-31 11
+165 val_165 2008-12-31 12
+165 val_165 2008-12-31 12
+166 val_166 2008-12-31 11
+166 val_166 2008-12-31 12
+167 val_167 2008-12-31 11
+167 val_167 2008-12-31 11
+167 val_167 2008-12-31 11
+167 val_167 2008-12-31 12
+167 val_167 2008-12-31 12
+167 val_167 2008-12-31 12
+168 val_168 2008-12-31 11
+168 val_168 2008-12-31 12
+169 val_169 2008-12-31 11
+169 val_169 2008-12-31 11
+169 val_169 2008-12-31 11
+169 val_169 2008-12-31 11
+169 val_169 2008-12-31 12
+169 val_169 2008-12-31 12
+169 val_169 2008-12-31 12
+169 val_169 2008-12-31 12
+17 val_17 2008-12-31 11
+17 val_17 2008-12-31 12
+170 val_170 2008-12-31 11
+170 val_170 2008-12-31 12
+172 val_172 2008-12-31 11
+172 val_172 2008-12-31 11
+172 val_172 2008-12-31 12
+172 val_172 2008-12-31 12
+174 val_174 2008-12-31 11
+174 val_174 2008-12-31 11
+174 val_174 2008-12-31 12
+174 val_174 2008-12-31 12
+175 val_175 2008-12-31 11
+175 val_175 2008-12-31 11
+175 val_175 2008-12-31 12
+175 val_175 2008-12-31 12
+176 val_176 2008-12-31 11
+176 val_176 2008-12-31 11
+176 val_176 2008-12-31 12
+176 val_176 2008-12-31 12
+177 val_177 2008-12-31 11
+177 val_177 2008-12-31 12
+178 val_178 2008-12-31 11
+178 val_178 2008-12-31 12
+179 val_179 2008-12-31 11
+179 val_179 2008-12-31 11
+179 val_179 2008-12-31 12
+179 val_179 2008-12-31 12
+18 val_18 2008-12-31 11
+18 val_18 2008-12-31 11
+18 val_18 2008-12-31 12
+18 val_18 2008-12-31 12
+180 val_180 2008-12-31 11
+180 val_180 2008-12-31 12
+181 val_181 2008-12-31 11
+181 val_181 2008-12-31 12
+183 val_183 2008-12-31 11
+183 val_183 2008-12-31 12
+186 val_186 2008-12-31 11
+186 val_186 2008-12-31 12
+187 val_187 2008-12-31 11
+187 val_187 2008-12-31 11
+187 val_187 2008-12-31 11
+187 val_187 2008-12-31 12
+187 val_187 2008-12-31 12
+187 val_187 2008-12-31 12
+189 val_189 2008-12-31 11
+189 val_189 2008-12-31 12
+19 val_19 2008-12-31 11
+19 val_19 2008-12-31 12
+190 val_190 2008-12-31 11
+190 val_190 2008-12-31 12
+191 val_191 2008-12-31 11
+191 val_191 2008-12-31 11
+191 val_191 2008-12-31 12
+191 val_191 2008-12-31 12
+192 val_192 2008-12-31 11
+192 val_192 2008-12-31 12
+193 val_193 2008-12-31 11
+193 val_193 2008-12-31 11
+193 val_193 2008-12-31 11
+193 val_193 2008-12-31 12
+193 val_193 2008-12-31 12
+193 val_193 2008-12-31 12
+194 val_194 2008-12-31 11
+194 val_194 2008-12-31 12
+195 val_195 2008-12-31 11
+195 val_195 2008-12-31 11
+195 val_195 2008-12-31 12
+195 val_195 2008-12-31 12
+196 val_196 2008-12-31 11
+196 val_196 2008-12-31 12
+197 val_197 2008-12-31 11
+197 val_197 2008-12-31 11
+197 val_197 2008-12-31 12
+197 val_197 2008-12-31 12
+199 val_199 2008-12-31 11
+199 val_199 2008-12-31 11
+199 val_199 2008-12-31 11
+199 val_199 2008-12-31 12
+199 val_199 2008-12-31 12
+199 val_199 2008-12-31 12
+2 val_2 2008-12-31 11
+2 val_2 2008-12-31 12
+20 val_20 2008-12-31 11
+20 val_20 2008-12-31 12
+200 val_200 2008-12-31 11
+200 val_200 2008-12-31 11
+200 val_200 2008-12-31 12
+200 val_200 2008-12-31 12
+201 val_201 2008-12-31 11
+201 val_201 2008-12-31 12
+202 val_202 2008-12-31 11
+202 val_202 2008-12-31 12
+203 val_203 2008-12-31 11
+203 val_203 2008-12-31 11
+203 val_203 2008-12-31 12
+203 val_203 2008-12-31 12
+205 val_205 2008-12-31 11
+205 val_205 2008-12-31 11
+205 val_205 2008-12-31 12
+205 val_205 2008-12-31 12
+207 val_207 2008-12-31 11
+207 val_207 2008-12-31 11
+207 val_207 2008-12-31 12
+207 val_207 2008-12-31 12
+208 val_208 2008-12-31 11
+208 val_208 2008-12-31 11
+208 val_208 2008-12-31 11
+208 val_208 2008-12-31 12
+208 val_208 2008-12-31 12
+208 val_208 2008-12-31 12
+209 val_209 2008-12-31 11
+209 val_209 2008-12-31 11
+209 val_209 2008-12-31 12
+209 val_209 2008-12-31 12
+213 val_213 2008-12-31 11
+213 val_213 2008-12-31 11
+213 val_213 2008-12-31 12
+213 val_213 2008-12-31 12
+214 val_214 2008-12-31 11
+214 val_214 2008-12-31 12
+216 val_216 2008-12-31 11
+216 val_216 2008-12-31 11
+216 val_216 2008-12-31 12
+216 val_216 2008-12-31 12
+217 val_217 2008-12-31 11
+217 val_217 2008-12-31 11
+217 val_217 2008-12-31 12
+217 val_217 2008-12-31 12
+218 val_218 2008-12-31 11
+218 val_218 2008-12-31 12
+219 val_219 2008-12-31 11
+219 val_219 2008-12-31 11
+219 val_219 2008-12-31 12
+219 val_219 2008-12-31 12
+221 val_221 2008-12-31 11
+221 val_221 2008-12-31 11
+221 val_221 2008-12-31 12
+221 val_221 2008-12-31 12
+222 val_222 2008-12-31 11
+222 val_222 2008-12-31 12
+223 val_223 2008-12-31 11
+223 val_223 2008-12-31 11
+223 val_223 2008-12-31 12
+223 val_223 2008-12-31 12
+224 val_224 2008-12-31 11
+224 val_224 2008-12-31 11
+224 val_224 2008-12-31 12
+224 val_224 2008-12-31 12
+226 val_226 2008-12-31 11
+226 val_226 2008-12-31 12
+228 val_228 2008-12-31 11
+228 val_228 2008-12-31 12
+229 val_229 2008-12-31 11
+229 val_229 2008-12-31 11
+229 val_229 2008-12-31 12
+229 val_229 2008-12-31 12
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 11
+230 val_230 2008-12-31 12
+230 val_230 2008-12-31 12
+230 val_230 2008-12-31 12
+230 val_230 2008-12-31 12
+230 val_230 2008-12-31 12
+233 val_233 2008-12-31 11
+233 val_233 2008-12-31 11
+233 val_233 2008-12-31 12
+233 val_233 2008-12-31 12
+235 val_235 2008-12-31 11
+235 val_235 2008-12-31 12
+237 val_237 2008-12-31 11
+237 val_237 2008-12-31 11
+237 val_237 2008-12-31 12
+237 val_237 2008-12-31 12
+238 val_238 2008-12-31 11
+238 val_238 2008-12-31 11
+238 val_238 2008-12-31 12
+238 val_238 2008-12-31 12
+239 val_239 2008-12-31 11
+239 val_239 2008-12-31 11
+239 val_239 2008-12-31 12
+239 val_239 2008-12-31 12
+24 val_24 2008-12-31 11
+24 val_24 2008-12-31 11
+24 val_24 2008-12-31 12
+24 val_24 2008-12-31 12
+241 val_241 2008-12-31 11
+241 val_241 2008-12-31 12
+242 val_242 2008-12-31 11
+242 val_242 2008-12-31 11
+242 val_242 2008-12-31 12
+242 val_242 2008-12-31 12
+244 val_244 2008-12-31 11
+244 val_244 2008-12-31 12
+247 val_247 2008-12-31 11
+247 val_247 2008-12-31 12
+248 val_248 2008-12-31 11
+248 val_248 2008-12-31 12
+249 val_249 2008-12-31 11
+249 val_249 2008-12-31 12
+252 val_252 2008-12-31 11
+252 val_252 2008-12-31 12
+255 val_255 2008-12-31 11
+255 val_255 2008-12-31 11
+255 val_255 2008-12-31 12
+255 val_255 2008-12-31 12
+256 val_256 2008-12-31 11
+256 val_256 2008-12-31 11
+256 val_256 2008-12-31 12
+256 val_256 2008-12-31 12
+257 val_257 2008-12-31 11
+257 val_257 2008-12-31 12
+258 val_258 2008-12-31 11
+258 val_258 2008-12-31 12
+26 val_26 2008-12-31 11
+26 val_26 2008-12-31 11
+26 val_26 2008-12-31 12
+26 val_26 2008-12-31 12
+260 val_260 2008-12-31 11
+260 val_260 2008-12-31 12
+262 val_262 2008-12-31 11
+262 val_262 2008-12-31 12
+263 val_263 2008-12-31 11
+263 val_263 2008-12-31 12
+265 val_265 2008-12-31 11
+265 val_265 2008-12-31 11
+265 val_265 2008-12-31 12
+265 val_265 2008-12-31 12
+266 val_266 2008-12-31 11
+266 val_266 2008-12-31 12
+27 val_27 2008-12-31 11
+27 val_27 2008-12-31 12
+272 val_272 2008-12-31 11
+272 val_272 2008-12-31 11
+272 val_272 2008-12-31 12
+272 val_272 2008-12-31 12
+273 val_273 2008-12-31 11
+273 val_273 2008-12-31 11
+273 val_273 2008-12-31 11
+273 val_273 2008-12-31 12
+273 val_273 2008-12-31 12
+273 val_273 2008-12-31 12
+274 val_274 2008-12-31 11
+274 val_274 2008-12-31 12
+275 val_275 2008-12-31 11
+275 val_275 2008-12-31 12
+277 val_277 2008-12-31 11
+277 val_277 2008-12-31 11
+277 val_277 2008-12-31 11
+277 val_277 2008-12-31 11
+277 val_277 2008-12-31 12
+277 val_277 2008-12-31 12
+277 val_277 2008-12-31 12
+277 val_277 2008-12-31 12
+278 val_278 2008-12-31 11
+278 val_278 2008-12-31 11
+278 val_278 2008-12-31 12
+278 val_278 2008-12-31 12
+28 val_28 2008-12-31 11
+28 val_28 2008-12-31 12
+280 val_280 2008-12-31 11
+280 val_280 2008-12-31 11
+280 val_280 2008-12-31 12
+280 val_280 2008-12-31 12
+281 val_281 2008-12-31 11
+281 val_281 2008-12-31 11
+281 val_281 2008-12-31 12
+281 val_281 2008-12-31 12
+282 val_282 2008-12-31 11
+282 val_282 2008-12-31 11
+282 val_282 2008-12-31 12
+282 val_282 2008-12-31 12
+283 val_283 2008-12-31 11
+283 val_283 2008-12-31 12
+284 val_284 2008-12-31 11
+284 val_284 2008-12-31 12
+285 val_285 2008-12-31 11
+285 val_285 2008-12-31 12
+286 val_286 2008-12-31 11
+286 val_286 2008-12-31 12
+287 val_287 2008-12-31 11
+287 val_287 2008-12-31 12
+288 val_288 2008-12-31 11
+288 val_288 2008-12-31 11
+288 val_288 2008-12-31 12
+288 val_288 2008-12-31 12
+289 val_289 2008-12-31 11
+289 val_289 2008-12-31 12
+291 val_291 2008-12-31 11
+291 val_291 2008-12-31 12
+292 val_292 2008-12-31 11
+292 val_292 2008-12-31 12
+296 val_296 2008-12-31 11
+296 val_296 2008-12-31 12
+298 val_298 2008-12-31 11
+298 val_298 2008-12-31 11
+298 val_298 2008-12-31 11
+298 val_298 2008-12-31 12
+298 val_298 2008-12-31 12
+298 val_298 2008-12-31 12
+30 val_30 2008-12-31 11
+30 val_30 2008-12-31 12
+302 val_302 2008-12-31 11
+302 val_302 2008-12-31 12
+305 val_305 2008-12-31 11
+305 val_305 2008-12-31 12
+306 val_306 2008-12-31 11
+306 val_306 2008-12-31 12
+307 val_307 2008-12-31 11
+307 val_307 2008-12-31 11
+307 val_307 2008-12-31 12
+307 val_307 2008-12-31 12
+308 val_308 2008-12-31 11
+308 val_308 2008-12-31 12
+309 val_309 2008-12-31 11
+309 val_309 2008-12-31 11
+309 val_309 2008-12-31 12
+309 val_309 2008-12-31 12
+310 val_310 2008-12-31 11
+310 val_310 2008-12-31 12
+311 val_311 2008-12-31 11
+311 val_311 2008-12-31 11
+311 val_311 2008-12-31 11
+311 val_311 2008-12-31 12
+311 val_311 2008-12-31 12
+311 val_311 2008-12-31 12
+315 val_315 2008-12-31 11
+315 val_315 2008-12-31 12
+316 val_316 2008-12-31 11
+316 val_316 2008-12-31 11
+316 val_316 2008-12-31 11
+316 val_316 2008-12-31 12
+316 val_316 2008-12-31 12
+316 val_316 2008-12-31 12
+317 val_317 2008-12-31 11
+317 val_317 2008-12-31 11
+317 val_317 2008-12-31 12
+317 val_317 2008-12-31 12
+318 val_318 2008-12-31 11
+318 val_318 2008-12-31 11
+318 val_318 2008-12-31 11
+318 val_318 2008-12-31 12
+318 val_318 2008-12-31 12
+318 val_318 2008-12-31 12
+321 val_321 2008-12-31 11
+321 val_321 2008-12-31 11
+321 val_321 2008-12-31 12
+321 val_321 2008-12-31 12
+322 val_322 2008-12-31 11
+322 val_322 2008-12-31 11
+322 val_322 2008-12-31 12
+322 val_322 2008-12-31 12
+323 val_323 2008-12-31 11
+323 val_323 2008-12-31 12
+325 val_325 2008-12-31 11
+325 val_325 2008-12-31 11
+325 val_325 2008-12-31 12
+325 val_325 2008-12-31 12
+327 val_327 2008-12-31 11
+327 val_327 2008-12-31 11
+327 val_327 2008-12-31 11
+327 val_327 2008-12-31 12
+327 val_327 2008-12-31 12
+327 val_327 2008-12-31 12
+33 val_33 2008-12-31 11
+33 val_33 2008-12-31 12
+331 val_331 2008-12-31 11
+331 val_331 2008-12-31 11
+331 val_331 2008-12-31 12
+331 val_331 2008-12-31 12
+332 val_332 2008-12-31 11
+332 val_332 2008-12-31 12
+333 val_333 2008-12-31 11
+333 val_333 2008-12-31 11
+333 val_333 2008-12-31 12
+333 val_333 2008-12-31 12
+335 val_335 2008-12-31 11
+335 val_335 2008-12-31 12
+336 val_336 2008-12-31 11
+336 val_336 2008-12-31 12
+338 val_338 2008-12-31 11
+338 val_338 2008-12-31 12
+339 val_339 2008-12-31 11
+339 val_339 2008-12-31 12
+34 val_34 2008-12-31 11
+34 val_34 2008-12-31 12
+341 val_341 2008-12-31 11
+341 val_341 2008-12-31 12
+342 val_342 2008-12-31 11
+342 val_342 2008-12-31 11
+342 val_342 2008-12-31 12
+342 val_342 2008-12-31 12
+344 val_344 2008-12-31 11
+344 val_344 2008-12-31 11
+344 val_344 2008-12-31 12
+344 val_344 2008-12-31 12
+345 val_345 2008-12-31 11
+345 val_345 2008-12-31 12
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 11
+348 val_348 2008-12-31 12
+348 val_348 2008-12-31 12
+348 val_348 2008-12-31 12
+348 val_348 2008-12-31 12
+348 val_348 2008-12-31 12
+35 val_35 2008-12-31 11
+35 val_35 2008-12-31 11
+35 val_35 2008-12-31 11
+35 val_35 2008-12-31 12
+35 val_35 2008-12-31 12
+35 val_35 2008-12-31 12
+351 val_351 2008-12-31 11
+351 val_351 2008-12-31 12
+353 val_353 2008-12-31 11
+353 val_353 2008-12-31 11
+353 val_353 2008-12-31 12
+353 val_353 2008-12-31 12
+356 val_356 2008-12-31 11
+356 val_356 2008-12-31 12
+360 val_360 2008-12-31 11
+360 val_360 2008-12-31 12
+362 val_362 2008-12-31 11
+362 val_362 2008-12-31 12
+364 val_364 2008-12-31 11
+364 val_364 2008-12-31 12
+365 val_365 2008-12-31 11
+365 val_365 2008-12-31 12
+366 val_366 2008-12-31 11
+366 val_366 2008-12-31 12
+367 val_367 2008-12-31 11
+367 val_367 2008-12-31 11
+367 val_367 2008-12-31 12
+367 val_367 2008-12-31 12
+368 val_368 2008-12-31 11
+368 val_368 2008-12-31 12
+369 val_369 2008-12-31 11
+369 val_369 2008-12-31 11
+369 val_369 2008-12-31 11
+369 val_369 2008-12-31 12
+369 val_369 2008-12-31 12
+369 val_369 2008-12-31 12
+37 val_37 2008-12-31 11
+37 val_37 2008-12-31 11
+37 val_37 2008-12-31 12
+37 val_37 2008-12-31 12
+373 val_373 2008-12-31 11
+373 val_373 2008-12-31 12
+374 val_374 2008-12-31 11
+374 val_374 2008-12-31 12
+375 val_375 2008-12-31 11
+375 val_375 2008-12-31 12
+377 val_377 2008-12-31 11
+377 val_377 2008-12-31 12
+378 val_378 2008-12-31 11
+378 val_378 2008-12-31 12
+379 val_379 2008-12-31 11
+379 val_379 2008-12-31 12
+382 val_382 2008-12-31 11
+382 val_382 2008-12-31 11
+382 val_382 2008-12-31 12
+382 val_382 2008-12-31 12
+384 val_384 2008-12-31 11
+384 val_384 2008-12-31 11
+384 val_384 2008-12-31 11
+384 val_384 2008-12-31 12
+384 val_384 2008-12-31 12
+384 val_384 2008-12-31 12
+386 val_386 2008-12-31 11
+386 val_386 2008-12-31 12
+389 val_389 2008-12-31 11
+389 val_389 2008-12-31 12
+392 val_392 2008-12-31 11
+392 val_392 2008-12-31 12
+393 val_393 2008-12-31 11
+393 val_393 2008-12-31 12
+394 val_394 2008-12-31 11
+394 val_394 2008-12-31 12
+395 val_395 2008-12-31 11
+395 val_395 2008-12-31 11
+395 val_395 2008-12-31 12
+395 val_395 2008-12-31 12
+396 val_396 2008-12-31 11
+396 val_396 2008-12-31 11
+396 val_396 2008-12-31 11
+396 val_396 2008-12-31 12
+396 val_396 2008-12-31 12
+396 val_396 2008-12-31 12
+397 val_397 2008-12-31 11
+397 val_397 2008-12-31 11
+397 val_397 2008-12-31 12
+397 val_397 2008-12-31 12
+399 val_399 2008-12-31 11
+399 val_399 2008-12-31 11
+399 val_399 2008-12-31 12
+399 val_399 2008-12-31 12
+4 val_4 2008-12-31 11
+4 val_4 2008-12-31 12
+400 val_400 2008-12-31 11
+400 val_400 2008-12-31 12
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 11
+401 val_401 2008-12-31 12
+401 val_401 2008-12-31 12
+401 val_401 2008-12-31 12
+401 val_401 2008-12-31 12
+401 val_401 2008-12-31 12
+402 val_402 2008-12-31 11
+402 val_402 2008-12-31 12
+403 val_403 2008-12-31 11
+403 val_403 2008-12-31 11
+403 val_403 2008-12-31 11
+403 val_403 2008-12-31 12
+403 val_403 2008-12-31 12
+403 val_403 2008-12-31 12
+404 val_404 2008-12-31 11
+404 val_404 2008-12-31 11
+404 val_404 2008-12-31 12
+404 val_404 2008-12-31 12
+406 val_406 2008-12-31 11
+406 val_406 2008-12-31 11
+406 val_406 2008-12-31 11
+406 val_406 2008-12-31 11
+406 val_406 2008-12-31 12
+406 val_406 2008-12-31 12
+406 val_406 2008-12-31 12
+406 val_406 2008-12-31 12
+407 val_407 2008-12-31 11
+407 val_407 2008-12-31 12
+409 val_409 2008-12-31 11
+409 val_409 2008-12-31 11
+409 val_409 2008-12-31 11
+409 val_409 2008-12-31 12
+409 val_409 2008-12-31 12
+409 val_409 2008-12-31 12
+41 val_41 2008-12-31 11
+41 val_41 2008-12-31 12
+411 val_411 2008-12-31 11
+411 val_411 2008-12-31 12
+413 val_413 2008-12-31 11
+413 val_413 2008-12-31 11
+413 val_413 2008-12-31 12
+413 val_413 2008-12-31 12
+414 val_414 2008-12-31 11
+414 val_414 2008-12-31 11
+414 val_414 2008-12-31 12
+414 val_414 2008-12-31 12
+417 val_417 2008-12-31 11
+417 val_417 2008-12-31 11
+417 val_417 2008-12-31 11
+417 val_417 2008-12-31 12
+417 val_417 2008-12-31 12
+417 val_417 2008-12-31 12
+418 val_418 2008-12-31 11
+418 val_418 2008-12-31 12
+419 val_419 2008-12-31 11
+419 val_419 2008-12-31 12
+42 val_42 2008-12-31 11
+42 val_42 2008-12-31 11
+42 val_42 2008-12-31 12
+42 val_42 2008-12-31 12
+421 val_421 2008-12-31 11
+421 val_421 2008-12-31 12
+424 val_424 2008-12-31 11
+424 val_424 2008-12-31 11
+424 val_424 2008-12-31 12
+424 val_424 2008-12-31 12
+427 val_427 2008-12-31 11
+427 val_427 2008-12-31 12
+429 val_429 2008-12-31 11
+429 val_429 2008-12-31 11
+429 val_429 2008-12-31 12
+429 val_429 2008-12-31 12
+43 val_43 2008-12-31 11
+43 val_43 2008-12-31 12
+430 val_430 2008-12-31 11
+430 val_430 2008-12-31 11
+430 val_430 2008-12-31 11
+430 val_430 2008-12-31 12
+430 val_430 2008-12-31 12
+430 val_430 2008-12-31 12
+431 val_431 2008-12-31 11
+431 val_431 2008-12-31 11
+431 val_431 2008-12-31 11
+431 val_431 2008-12-31 12
+431 val_431 2008-12-31 12
+431 val_431 2008-12-31 12
+432 val_432 2008-12-31 11
+432 val_432 2008-12-31 12
+435 val_435 2008-12-31 11
+435 val_435 2008-12-31 12
+436 val_436 2008-12-31 11
+436 val_436 2008-12-31 12
+437 val_437 2008-12-31 11
+437 val_437 2008-12-31 12
+438 val_438 2008-12-31 11
+438 val_438 2008-12-31 11
+438 val_438 2008-12-31 11
+438 val_438 2008-12-31 12
+438 val_438 2008-12-31 12
+438 val_438 2008-12-31 12
+439 val_439 2008-12-31 11
+439 val_439 2008-12-31 11
+439 val_439 2008-12-31 12
+439 val_439 2008-12-31 12
+44 val_44 2008-12-31 11
+44 val_44 2008-12-31 12
+443 val_443 2008-12-31 11
+443 val_443 2008-12-31 12
+444 val_444 2008-12-31 11
+444 val_444 2008-12-31 12
+446 val_446 2008-12-31 11
+446 val_446 2008-12-31 12
+448 val_448 2008-12-31 11
+448 val_448 2008-12-31 12
+449 val_449 2008-12-31 11
+449 val_449 2008-12-31 12
+452 val_452 2008-12-31 11
+452 val_452 2008-12-31 12
+453 val_453 2008-12-31 11
+453 val_453 2008-12-31 12
+454 val_454 2008-12-31 11
+454 val_454 2008-12-31 11
+454 val_454 2008-12-31 11
+454 val_454 2008-12-31 12
+454 val_454 2008-12-31 12
+454 val_454 2008-12-31 12
+455 val_455 2008-12-31 11
+455 val_455 2008-12-31 12
+457 val_457 2008-12-31 11
+457 val_457 2008-12-31 12
+458 val_458 2008-12-31 11
+458 val_458 2008-12-31 11
+458 val_458 2008-12-31 12
+458 val_458 2008-12-31 12
+459 val_459 2008-12-31 11
+459 val_459 2008-12-31 11
+459 val_459 2008-12-31 12
+459 val_459 2008-12-31 12
+460 val_460 2008-12-31 11
+460 val_460 2008-12-31 12
+462 val_462 2008-12-31 11
+462 val_462 2008-12-31 11
+462 val_462 2008-12-31 12
+462 val_462 2008-12-31 12
+463 val_463 2008-12-31 11
+463 val_463 2008-12-31 11
+463 val_463 2008-12-31 12
+463 val_463 2008-12-31 12
+466 val_466 2008-12-31 11
+466 val_466 2008-12-31 11
+466 val_466 2008-12-31 11
+466 val_466 2008-12-31 12
+466 val_466 2008-12-31 12
+466 val_466 2008-12-31 12
+467 val_467 2008-12-31 11
+467 val_467 2008-12-31 12
+468 val_468 2008-12-31 11
+468 val_468 2008-12-31 11
+468 val_468 2008-12-31 11
+468 val_468 2008-12-31 11
+468 val_468 2008-12-31 12
+468 val_468 2008-12-31 12
+468 val_468 2008-12-31 12
+468 val_468 2008-12-31 12
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 11
+469 val_469 2008-12-31 12
+469 val_469 2008-12-31 12
+469 val_469 2008-12-31 12
+469 val_469 2008-12-31 12
+469 val_469 2008-12-31 12
+47 val_47 2008-12-31 11
+47 val_47 2008-12-31 12
+470 val_470 2008-12-31 11
+470 val_470 2008-12-31 12
+472 val_472 2008-12-31 11
+472 val_472 2008-12-31 12
+475 val_475 2008-12-31 11
+475 val_475 2008-12-31 12
+477 val_477 2008-12-31 11
+477 val_477 2008-12-31 12
+478 val_478 2008-12-31 11
+478 val_478 2008-12-31 11
+478 val_478 2008-12-31 12
+478 val_478 2008-12-31 12
+479 val_479 2008-12-31 11
+479 val_479 2008-12-31 12
+480 val_480 2008-12-31 11
+480 val_480 2008-12-31 11
+480 val_480 2008-12-31 11
+480 val_480 2008-12-31 12
+480 val_480 2008-12-31 12
+480 val_480 2008-12-31 12
+481 val_481 2008-12-31 11
+481 val_481 2008-12-31 12
+482 val_482 2008-12-31 11
+482 val_482 2008-12-31 12
+483 val_483 2008-12-31 11
+483 val_483 2008-12-31 12
+484 val_484 2008-12-31 11
+484 val_484 2008-12-31 12
+485 val_485 2008-12-31 11
+485 val_485 2008-12-31 12
+487 val_487 2008-12-31 11
+487 val_487 2008-12-31 12
+489 val_489 2008-12-31 11
+489 val_489 2008-12-31 11
+489 val_489 2008-12-31 11
+489 val_489 2008-12-31 11
+489 val_489 2008-12-31 12
+489 val_489 2008-12-31 12
+489 val_489 2008-12-31 12
+489 val_489 2008-12-31 12
+490 val_490 2008-12-31 11
+490 val_490 2008-12-31 12
+491 val_491 2008-12-31 11
+491 val_491 2008-12-31 12
+492 val_492 2008-12-31 11
+492 val_492 2008-12-31 11
+492 val_492 2008-12-31 12
+492 val_492 2008-12-31 12
+493 val_493 2008-12-31 11
+493 val_493 2008-12-31 12
+494 val_494 2008-12-31 11
+494 val_494 2008-12-31 12
+495 val_495 2008-12-31 11
+495 val_495 2008-12-31 12
+496 val_496 2008-12-31 11
+496 val_496 2008-12-31 12
+497 val_497 2008-12-31 11
+497 val_497 2008-12-31 12
+498 val_498 2008-12-31 11
+498 val_498 2008-12-31 11
+498 val_498 2008-12-31 11
+498 val_498 2008-12-31 12
+498 val_498 2008-12-31 12
+498 val_498 2008-12-31 12
+5 val_5 2008-12-31 11
+5 val_5 2008-12-31 11
+5 val_5 2008-12-31 11
+5 val_5 2008-12-31 12
+5 val_5 2008-12-31 12
+5 val_5 2008-12-31 12
+51 val_51 2008-12-31 11
+51 val_51 2008-12-31 11
+51 val_51 2008-12-31 12
+51 val_51 2008-12-31 12
+53 val_53 2008-12-31 11
+53 val_53 2008-12-31 12
+54 val_54 2008-12-31 11
+54 val_54 2008-12-31 12
+57 val_57 2008-12-31 11
+57 val_57 2008-12-31 12
+58 val_58 2008-12-31 11
+58 val_58 2008-12-31 11
+58 val_58 2008-12-31 12
+58 val_58 2008-12-31 12
+64 val_64 2008-12-31 11
+64 val_64 2008-12-31 12
+65 val_65 2008-12-31 11
+65 val_65 2008-12-31 12
+66 val_66 2008-12-31 11
+66 val_66 2008-12-31 12
+67 val_67 2008-12-31 11
+67 val_67 2008-12-31 11
+67 val_67 2008-12-31 12
+67 val_67 2008-12-31 12
+69 val_69 2008-12-31 11
+69 val_69 2008-12-31 12
+70 val_70 2008-12-31 11
+70 val_70 2008-12-31 11
+70 val_70 2008-12-31 11
+70 val_70 2008-12-31 12
+70 val_70 2008-12-31 12
+70 val_70 2008-12-31 12
+72 val_72 2008-12-31 11
+72 val_72 2008-12-31 11
+72 val_72 2008-12-31 12
+72 val_72 2008-12-31 12
+74 val_74 2008-12-31 11
+74 val_74 2008-12-31 12
+76 val_76 2008-12-31 11
+76 val_76 2008-12-31 11
+76 val_76 2008-12-31 12
+76 val_76 2008-12-31 12
+77 val_77 2008-12-31 11
+77 val_77 2008-12-31 12
+78 val_78 2008-12-31 11
+78 val_78 2008-12-31 12
+8 val_8 2008-12-31 11
+8 val_8 2008-12-31 12
+80 val_80 2008-12-31 11
+80 val_80 2008-12-31 12
+82 val_82 2008-12-31 11
+82 val_82 2008-12-31 12
+83 val_83 2008-12-31 11
+83 val_83 2008-12-31 11
+83 val_83 2008-12-31 12
+83 val_83 2008-12-31 12
+84 val_84 2008-12-31 11
+84 val_84 2008-12-31 11
+84 val_84 2008-12-31 12
+84 val_84 2008-12-31 12
+85 val_85 2008-12-31 11
+85 val_85 2008-12-31 12
+86 val_86 2008-12-31 11
+86 val_86 2008-12-31 12
+87 val_87 2008-12-31 11
+87 val_87 2008-12-31 12
+9 val_9 2008-12-31 11
+9 val_9 2008-12-31 12
+90 val_90 2008-12-31 11
+90 val_90 2008-12-31 11
+90 val_90 2008-12-31 11
+90 val_90 2008-12-31 12
+90 val_90 2008-12-31 12
+90 val_90 2008-12-31 12
+92 val_92 2008-12-31 11
+92 val_92 2008-12-31 12
+95 val_95 2008-12-31 11
+95 val_95 2008-12-31 11
+95 val_95 2008-12-31 12
+95 val_95 2008-12-31 12
+96 val_96 2008-12-31 11
+96 val_96 2008-12-31 12
+97 val_97 2008-12-31 11
+97 val_97 2008-12-31 11
+97 val_97 2008-12-31 12
+97 val_97 2008-12-31 12
+98 val_98 2008-12-31 11
+98 val_98 2008-12-31 11
+98 val_98 2008-12-31 12
+98 val_98 2008-12-31 12
[08/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge1.q.out b/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
new file mode 100644
index 0000000..241fb05
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge1.q.out
@@ -0,0 +1,500 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE orcfile_merge1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge1b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge1b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge1c
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge1c
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1
+POSTHOOK: query: CREATE TABLE orcfile_merge1 (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1
+PREHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1b
+POSTHOOK: query: CREATE TABLE orcfile_merge1b (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1b
+PREHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge1c
+POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING)
+ PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge1c
+PREHOOK: query: -- merge disabled
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- merge disabled
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 1
+ part
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 6 items
+#### A masked pattern was here ####
+PREHOOK: query: -- auto-merge slow way
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- auto-merge slow way
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
+ Stage-5
+ Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4
+ Stage-6
+ Stage-7 depends on stages: Stage-6
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1b
+ Execution mode: llap
+
+ Stage: Stage-8
+ Conditional Operator
+
+ Stage: Stage-5
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 1
+ part
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1b
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-4
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1b
+
+ Stage: Stage-6
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Map Operator Tree:
+ TableScan
+ File Output Operator
+ compressed: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1b
+
+ Stage: Stage-7
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1b@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1b@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- auto-merge fast way
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- auto-merge fast way
+EXPLAIN
+ INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
+ Stage-5
+ Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4
+ Stage-6
+ Stage-7 depends on stages: Stage-6
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 2) (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1c
+ Execution mode: llap
+
+ Stage: Stage-8
+ Conditional Operator
+
+ Stage: Stage-5
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 1
+ part
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge1c
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-4
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-7
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge1c@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+ SELECT key, value, PMOD(HASH(key), 2) as part
+ FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Output: default@orcfile_merge1c@ds=1/part=1
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1 WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Verify
+SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1 WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1b WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1b WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1c WHERE ds='1'
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge1c WHERE ds='1'
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+-21975308766
+PREHOOK: query: select count(*) from orcfile_merge1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from orcfile_merge1b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from orcfile_merge1c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orcfile_merge1c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=0
+POSTHOOK: Input: default@orcfile_merge1c@ds=1/part=1
+#### A masked pattern was here ####
+500
+PREHOOK: query: DROP TABLE orcfile_merge1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1
+PREHOOK: Output: default@orcfile_merge1
+POSTHOOK: query: DROP TABLE orcfile_merge1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge1
+POSTHOOK: Output: default@orcfile_merge1
+PREHOOK: query: DROP TABLE orcfile_merge1b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1b
+PREHOOK: Output: default@orcfile_merge1b
+POSTHOOK: query: DROP TABLE orcfile_merge1b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge1b
+POSTHOOK: Output: default@orcfile_merge1b
+PREHOOK: query: DROP TABLE orcfile_merge1c
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge1c
+PREHOOK: Output: default@orcfile_merge1c
+POSTHOOK: query: DROP TABLE orcfile_merge1c
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge1c
+POSTHOOK: Output: default@orcfile_merge1c
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge2.q.out b/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
new file mode 100644
index 0000000..a399be9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge2.q.out
@@ -0,0 +1,231 @@
+PREHOOK: query: DROP TABLE orcfile_merge2a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge2a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING)
+ PARTITIONED BY (one string, two string, three string)
+ STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge2a
+POSTHOOK: query: CREATE TABLE orcfile_merge2a (key INT, value STRING)
+ PARTITIONED BY (one string, two string, three string)
+ STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge2a
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge2a
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ one 1
+ three
+ two
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge2a
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge2a@one=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+ SELECT key, value, PMOD(HASH(key), 10) as two,
+ PMOD(HASH(value), 10) as three
+ FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=2
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=0/three=8
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=3
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=1/three=9
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=0
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=2/three=4
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=1
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=3/three=5
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=2
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=4/three=6
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=3
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=5/three=7
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=4
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=6/three=8
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=5
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=7/three=9
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=0
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=8/three=6
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=1
+POSTHOOK: Output: default@orcfile_merge2a@one=1/two=9/three=7
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=0,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=1,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=2,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=3,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=4,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=5,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=4).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=6,three=8).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=5).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=7,three=9).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=8,three=6).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge2a PARTITION(one=1,two=9,three=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge2a
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge2a
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1
+PREHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
+ FROM orcfile_merge2a
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge2a
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=2
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=0/three=8
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=3
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=1/three=9
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=0
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=2/three=4
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=1
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=3/three=5
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=2
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=4/three=6
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=3
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=5/three=7
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=4
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=6/three=8
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=5
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=7/three=9
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=0
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=8/three=6
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=1
+POSTHOOK: Input: default@orcfile_merge2a@one=1/two=9/three=7
+#### A masked pattern was here ####
+-4209012844
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10),
+ PMOD(HASH(value), 10)) USING 'tr \t _' AS (c)
+ FROM src
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value, '1', PMOD(HASH(key), 10),
+ PMOD(HASH(value), 10)) USING 'tr \t _' AS (c)
+ FROM src
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+-4209012844
+PREHOOK: query: DROP TABLE orcfile_merge2a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge2a
+PREHOOK: Output: default@orcfile_merge2a
+POSTHOOK: query: DROP TABLE orcfile_merge2a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge2a
+POSTHOOK: Output: default@orcfile_merge2a
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge3.q.out b/ql/src/test/results/clientpositive/llap/orc_merge3.q.out
new file mode 100644
index 0000000..893b6d2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge3.q.out
@@ -0,0 +1,170 @@
+PREHOOK: query: DROP TABLE orcfile_merge3a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge3a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge3b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge3b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string)
+ PARTITIONED BY (ds string) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge3a
+POSTHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string)
+ PARTITIONED BY (ds string) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge3a
+PREHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge3b
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=2
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orcfile_merge3a
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge3b
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orcfile_merge3b
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Input: default@orcfile_merge3a@ds=1
+PREHOOK: Input: default@orcfile_merge3a@ds=2
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Input: default@orcfile_merge3a@ds=1
+POSTHOOK: Input: default@orcfile_merge3a@ds=2
+POSTHOOK: Output: default@orcfile_merge3b
+POSTHOOK: Lineage: orcfile_merge3b.key SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge3b.value SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:value, type:string, comment:null), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3a
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Input: default@orcfile_merge3a@ds=1
+PREHOOK: Input: default@orcfile_merge3a@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3a
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Input: default@orcfile_merge3a@ds=1
+POSTHOOK: Input: default@orcfile_merge3a@ds=2
+#### A masked pattern was here ####
+14412220296
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3b
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3b
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3b
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3b
+#### A masked pattern was here ####
+14412220296
+PREHOOK: query: DROP TABLE orcfile_merge3a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Output: default@orcfile_merge3a
+POSTHOOK: query: DROP TABLE orcfile_merge3a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Output: default@orcfile_merge3a
+PREHOOK: query: DROP TABLE orcfile_merge3b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge3b
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: DROP TABLE orcfile_merge3b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge3b
+POSTHOOK: Output: default@orcfile_merge3b
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge4.q.out b/ql/src/test/results/clientpositive/llap/orc_merge4.q.out
new file mode 100644
index 0000000..1018bcf
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge4.q.out
@@ -0,0 +1,186 @@
+PREHOOK: query: DROP TABLE orcfile_merge3a
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge3a
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orcfile_merge3b
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orcfile_merge3b
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string)
+ PARTITIONED BY (ds string) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge3a
+POSTHOOK: query: CREATE TABLE orcfile_merge3a (key int, value string)
+ PARTITIONED BY (ds string) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge3a
+PREHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: CREATE TABLE orcfile_merge3b (key int, value string) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcfile_merge3b
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=1
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
+ SELECT * FROM src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcfile_merge3a@ds=2
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
+ SELECT * FROM src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@orcfile_merge3a@ds=2
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcfile_merge3a PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orcfile_merge3a
+ Statistics: Num rows: 1000 Data size: 94000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1000 Data size: 94000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1000 Data size: 94000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.orcfile_merge3b
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.orcfile_merge3b
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Input: default@orcfile_merge3a@ds=1
+PREHOOK: Input: default@orcfile_merge3a@ds=2
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge3b
+ SELECT key, value FROM orcfile_merge3a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Input: default@orcfile_merge3a@ds=1
+POSTHOOK: Input: default@orcfile_merge3a@ds=2
+POSTHOOK: Output: default@orcfile_merge3b
+POSTHOOK: Lineage: orcfile_merge3b.key SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: orcfile_merge3b.value SIMPLE [(orcfile_merge3a)orcfile_merge3a.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3a
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Input: default@orcfile_merge3a@ds=1
+PREHOOK: Input: default@orcfile_merge3a@ds=2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3a
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Input: default@orcfile_merge3a@ds=1
+POSTHOOK: Input: default@orcfile_merge3a@ds=2
+#### A masked pattern was here ####
+14412220296
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3b
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orcfile_merge3b
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
+ FROM orcfile_merge3b
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orcfile_merge3b
+#### A masked pattern was here ####
+14412220296
+PREHOOK: query: DROP TABLE orcfile_merge3a
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge3a
+PREHOOK: Output: default@orcfile_merge3a
+POSTHOOK: query: DROP TABLE orcfile_merge3a
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge3a
+POSTHOOK: Output: default@orcfile_merge3a
+PREHOOK: query: DROP TABLE orcfile_merge3b
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orcfile_merge3b
+PREHOOK: Output: default@orcfile_merge3b
+POSTHOOK: query: DROP TABLE orcfile_merge3b
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orcfile_merge3b
+POSTHOOK: Output: default@orcfile_merge3b
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge5.q.out b/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
new file mode 100644
index 0000000..0837007
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge5.q.out
@@ -0,0 +1,344 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5b
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ filterExpr: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 3 files total
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 3 files total
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
+ Stage-5
+ Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4
+ Stage-6
+ Stage-7 depends on stages: Stage-6
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ filterExpr: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+ Execution mode: llap
+
+ Stage: Stage-8
+ Conditional Operator
+
+ Stage: Stage-5
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-4
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-7
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5b
+POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
+PREHOOK: query: explain alter table orc_merge5b concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: query: explain alter table orc_merge5b concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-1 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-0
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5b
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: alter table orc_merge5b concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: alter table orc_merge5b concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5b compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+PREHOOK: Output: default@orc_merge5b
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5b compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+POSTHOOK: Output: default@orc_merge5b
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: select * from orc_merge5b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5b
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05
+2 foo 0.8 1 1969-12-31 16:00:00
+5 eat 0.8 6 1969-12-31 16:00:20
[19/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/join_nullsafe.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join_nullsafe.q.out b/ql/src/test/results/clientpositive/llap/join_nullsafe.q.out
new file mode 100644
index 0000000..5ec2db7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/join_nullsafe.q.out
@@ -0,0 +1,1667 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE myinput1(key int, value int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@myinput1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE myinput1(key int, value int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@myinput1
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@myinput1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@myinput1
+PREHOOK: query: -- merging
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- merging
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: int)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: int)
+ sort order: +
+ Map-reduce partition columns: value (type: int)
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 value (type: int)
+ nullSafes: [true]
+ outputColumnNames: _col0, _col1, _col5, _col6
+ Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- SORT_QUERY_RESULTS
+select * from myinput1 a join myinput1 b on a.key<=>b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+select * from myinput1 a join myinput1 b on a.key<=>b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+10 NULL NULL 10
+100 100 100 100
+NULL 10 10 NULL
+NULL 10 48 NULL
+NULL 10 NULL NULL
+NULL 35 10 NULL
+NULL 35 48 NULL
+NULL 35 NULL NULL
+NULL NULL 10 NULL
+NULL NULL 48 NULL
+NULL NULL NULL NULL
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: int)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: int)
+ sort order: +
+ Map-reduce partition columns: value (type: int)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: int)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: int)
+ 1 value (type: int)
+ 2 key (type: int)
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+10 NULL NULL 10 10 NULL
+100 100 100 100 100 100
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: int)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: int)
+ sort order: +
+ Map-reduce partition columns: value (type: int)
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: int)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: int)
+ 1 value (type: int)
+ 2 key (type: int)
+ nullSafes: [true]
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+10 NULL NULL 10 10 NULL
+100 100 100 100 100 100
+NULL 10 10 NULL NULL 10
+NULL 10 10 NULL NULL 35
+NULL 10 10 NULL NULL NULL
+NULL 10 48 NULL NULL 10
+NULL 10 48 NULL NULL 35
+NULL 10 48 NULL NULL NULL
+NULL 10 NULL NULL NULL 10
+NULL 10 NULL NULL NULL 35
+NULL 10 NULL NULL NULL NULL
+NULL 35 10 NULL NULL 10
+NULL 35 10 NULL NULL 35
+NULL 35 10 NULL NULL NULL
+NULL 35 48 NULL NULL 10
+NULL 35 48 NULL NULL 35
+NULL 35 48 NULL NULL NULL
+NULL 35 NULL NULL NULL 10
+NULL 35 NULL NULL NULL 35
+NULL 35 NULL NULL NULL NULL
+NULL NULL 10 NULL NULL 10
+NULL NULL 10 NULL NULL 35
+NULL NULL 10 NULL NULL NULL
+NULL NULL 48 NULL NULL 10
+NULL NULL 48 NULL NULL 35
+NULL NULL 48 NULL NULL NULL
+NULL NULL NULL NULL NULL 10
+NULL NULL NULL NULL NULL 35
+NULL NULL NULL NULL NULL NULL
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int), value (type: int)
+ sort order: ++
+ Map-reduce partition columns: key (type: int), value (type: int)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: int), key (type: int)
+ sort order: ++
+ Map-reduce partition columns: value (type: int), key (type: int)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int), value (type: int)
+ sort order: ++
+ Map-reduce partition columns: key (type: int), value (type: int)
+ Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: int), value (type: int)
+ 1 value (type: int), key (type: int)
+ 2 key (type: int), value (type: int)
+ nullSafes: [true, false]
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+100 100 100 100 100 100
+NULL 10 10 NULL NULL 10
+PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int), value (type: int)
+ sort order: ++
+ Map-reduce partition columns: key (type: int), value (type: int)
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: int), key (type: int)
+ sort order: ++
+ Map-reduce partition columns: value (type: int), key (type: int)
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int), value (type: int)
+ sort order: ++
+ Map-reduce partition columns: key (type: int), value (type: int)
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: int), value (type: int)
+ 1 value (type: int), key (type: int)
+ 2 key (type: int), value (type: int)
+ nullSafes: [true, true]
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+10 NULL NULL 10 10 NULL
+100 100 100 100 100 100
+NULL 10 10 NULL NULL 10
+NULL NULL NULL NULL NULL NULL
+PREHOOK: query: -- outer joins
+SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: -- outer joins
+SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+10 NULL NULL 10
+100 100 100 100
+48 NULL NULL NULL
+NULL 10 10 NULL
+NULL 10 48 NULL
+NULL 10 NULL NULL
+NULL 35 10 NULL
+NULL 35 48 NULL
+NULL 35 NULL NULL
+NULL NULL 10 NULL
+NULL NULL 48 NULL
+NULL NULL NULL NULL
+PREHOOK: query: SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+10 NULL NULL 10
+100 100 100 100
+NULL 10 10 NULL
+NULL 10 48 NULL
+NULL 10 NULL NULL
+NULL 35 10 NULL
+NULL 35 48 NULL
+NULL 35 NULL NULL
+NULL NULL 10 NULL
+NULL NULL 48 NULL
+NULL NULL NULL 35
+NULL NULL NULL NULL
+PREHOOK: query: SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+10 NULL NULL 10
+100 100 100 100
+48 NULL NULL NULL
+NULL 10 10 NULL
+NULL 10 48 NULL
+NULL 10 NULL NULL
+NULL 35 10 NULL
+NULL 35 48 NULL
+NULL 35 NULL NULL
+NULL NULL 10 NULL
+NULL NULL 48 NULL
+NULL NULL NULL 35
+NULL NULL NULL NULL
+PREHOOK: query: -- map joins
+SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: -- map joins
+SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+10 NULL NULL 10
+100 100 100 100
+NULL 10 10 NULL
+NULL 10 48 NULL
+NULL 10 NULL NULL
+NULL 35 10 NULL
+NULL 35 48 NULL
+NULL 35 NULL NULL
+NULL NULL 10 NULL
+NULL NULL 48 NULL
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+10 NULL NULL 10
+100 100 100 100
+NULL 10 10 NULL
+NULL 10 48 NULL
+NULL 10 NULL NULL
+NULL 35 10 NULL
+NULL 35 48 NULL
+NULL 35 NULL NULL
+NULL NULL 10 NULL
+NULL NULL 48 NULL
+NULL NULL NULL NULL
+PREHOOK: query: CREATE TABLE smb_input(key int, value int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_input
+POSTHOOK: query: CREATE TABLE smb_input(key int, value int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_input
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_input
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_input
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@smb_input
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@smb_input
+PREHOOK: query: -- smbs
+CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_input1
+POSTHOOK: query: -- smbs
+CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_input1
+PREHOOK: query: CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@smb_input2
+POSTHOOK: query: CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@smb_input2
+PREHOOK: query: from smb_input
+insert overwrite table smb_input1 select *
+insert overwrite table smb_input2 select *
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input
+PREHOOK: Output: default@smb_input1
+PREHOOK: Output: default@smb_input2
+POSTHOOK: query: from smb_input
+insert overwrite table smb_input1 select *
+insert overwrite table smb_input2 select *
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input
+POSTHOOK: Output: default@smb_input1
+POSTHOOK: Output: default@smb_input2
+POSTHOOK: Lineage: smb_input1.key SIMPLE [(smb_input)smb_input.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_input1.value SIMPLE [(smb_input)smb_input.FieldSchema(name:value, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_input2.key SIMPLE [(smb_input)smb_input.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_input2.value SIMPLE [(smb_input)smb_input.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 1000
+10 100 10 1000
+10 1000 10 100
+10 1000 10 100
+10 1000 10 1000
+100 100 100 100
+12 100 12 100
+12 100 12 NULL
+12 NULL 12 100
+12 NULL 12 NULL
+15 10015 15 10015
+20 10020 20 10020
+25 10025 25 10025
+30 10030 30 10030
+35 10035 35 10035
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+5 10005 5 10005
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+NULL 10050 NULL 10050
+NULL 10050 NULL 35
+NULL 10050 NULL NULL
+NULL 35 NULL 10050
+NULL 35 NULL 35
+NULL 35 NULL NULL
+NULL NULL NULL 10050
+NULL NULL NULL 35
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 1000 10 1000
+100 100 100 100
+12 100 12 100
+12 NULL 12 NULL
+15 10015 15 10015
+20 10020 20 10020
+25 10025 25 10025
+30 10030 30 10030
+35 10035 35 10035
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+5 10005 5 10005
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+NULL 10050 NULL 10050
+NULL 35 NULL 35
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 1000
+10 100 10 1000
+10 1000 10 100
+10 1000 10 100
+10 1000 10 1000
+100 100 100 100
+12 100 12 100
+12 100 12 NULL
+12 NULL 12 100
+12 NULL 12 NULL
+15 10015 15 10015
+20 10020 20 10020
+25 10025 25 10025
+30 10030 30 10030
+35 10035 35 10035
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+5 10005 5 10005
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+NULL 10050 NULL 10050
+NULL 10050 NULL 35
+NULL 10050 NULL NULL
+NULL 35 NULL 10050
+NULL 35 NULL 35
+NULL 35 NULL NULL
+NULL NULL NULL 10050
+NULL NULL NULL 35
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 1000
+10 100 10 1000
+10 1000 10 100
+10 1000 10 100
+10 1000 10 1000
+100 100 100 100
+12 100 12 100
+12 100 12 NULL
+12 NULL 12 100
+12 NULL 12 NULL
+15 10015 15 10015
+20 10020 20 10020
+25 10025 25 10025
+30 10030 30 10030
+35 10035 35 10035
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+5 10005 5 10005
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+NULL 10050 NULL 10050
+NULL 10050 NULL 35
+NULL 10050 NULL NULL
+NULL 35 NULL 10050
+NULL 35 NULL 35
+NULL 35 NULL NULL
+NULL NULL NULL 10050
+NULL NULL NULL 35
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input1
+#### A masked pattern was here ####
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 1000
+10 100 10 1000
+10 1000 10 100
+10 1000 10 100
+10 1000 10 1000
+100 100 100 100
+12 100 12 100
+12 100 12 NULL
+12 NULL 12 100
+12 NULL 12 NULL
+15 10015 15 10015
+20 10020 20 10020
+25 10025 25 10025
+30 10030 30 10030
+35 10035 35 10035
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+5 10005 5 10005
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+NULL 10050 NULL 10050
+NULL 10050 NULL 35
+NULL 10050 NULL NULL
+NULL 35 NULL 10050
+NULL 35 NULL 35
+NULL 35 NULL NULL
+NULL NULL NULL 10050
+NULL NULL NULL 35
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input1
+PREHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input1
+POSTHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+100 100 10 100
+100 100 10 100
+100 100 100 100
+100 100 12 100
+35 10035 NULL 35
+NULL 10050 12 NULL
+NULL 10050 NULL NULL
+NULL 35 12 NULL
+NULL 35 NULL NULL
+NULL NULL 12 NULL
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input1
+PREHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input1
+POSTHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+100 100 10 100
+100 100 10 100
+100 100 100 100
+100 100 12 100
+35 10035 NULL 35
+NULL 10050 12 NULL
+NULL 10050 NULL NULL
+NULL 35 12 NULL
+NULL 35 NULL NULL
+NULL NULL 12 NULL
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input2 b ON a.key <=> b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input1
+PREHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input2 b ON a.key <=> b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input1
+POSTHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+10 100 NULL NULL
+10 100 NULL NULL
+10 1000 NULL NULL
+100 100 10 100
+100 100 10 100
+100 100 100 100
+100 100 12 100
+12 100 NULL NULL
+12 NULL NULL NULL
+15 10015 NULL NULL
+20 10020 NULL NULL
+25 10025 NULL NULL
+30 10030 NULL NULL
+35 10035 NULL 35
+40 10040 NULL NULL
+40 10040 NULL NULL
+5 10005 NULL NULL
+50 10050 NULL NULL
+50 10050 NULL NULL
+50 10050 NULL NULL
+60 10040 NULL NULL
+60 10040 NULL NULL
+70 10040 NULL NULL
+70 10040 NULL NULL
+80 10040 NULL NULL
+80 10040 NULL NULL
+NULL 10050 12 NULL
+NULL 10050 NULL NULL
+NULL 35 12 NULL
+NULL 35 NULL NULL
+NULL NULL 12 NULL
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input2 b ON a.key <=> b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input1
+PREHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input2 b ON a.key <=> b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input1
+POSTHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+100 100 10 100
+100 100 10 100
+100 100 100 100
+100 100 12 100
+35 10035 NULL 35
+NULL 10050 12 NULL
+NULL 10050 NULL NULL
+NULL 35 12 NULL
+NULL 35 NULL NULL
+NULL NULL 10 1000
+NULL NULL 12 NULL
+NULL NULL 15 10015
+NULL NULL 20 10020
+NULL NULL 25 10025
+NULL NULL 30 10030
+NULL NULL 35 10035
+NULL NULL 40 10040
+NULL NULL 40 10040
+NULL NULL 5 10005
+NULL NULL 50 10050
+NULL NULL 50 10050
+NULL NULL 50 10050
+NULL NULL 60 10040
+NULL NULL 60 10040
+NULL NULL 70 10040
+NULL NULL 70 10040
+NULL NULL 80 10040
+NULL NULL 80 10040
+NULL NULL NULL 10050
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 100 100
+10 100 100 100
+10 100 12 100
+10 100 12 100
+10 1000 10 1000
+100 100 10 100
+100 100 10 100
+100 100 100 100
+100 100 12 100
+12 100 10 100
+12 100 10 100
+12 100 100 100
+12 100 12 100
+12 NULL 12 NULL
+12 NULL NULL NULL
+15 10015 15 10015
+20 10020 20 10020
+25 10025 25 10025
+30 10030 30 10030
+35 10035 35 10035
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 80 10040
+40 10040 80 10040
+40 10040 80 10040
+40 10040 80 10040
+5 10005 5 10005
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 NULL 10050
+50 10050 NULL 10050
+50 10050 NULL 10050
+60 10040 40 10040
+60 10040 40 10040
+60 10040 40 10040
+60 10040 40 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 80 10040
+60 10040 80 10040
+60 10040 80 10040
+60 10040 80 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 80 10040
+70 10040 80 10040
+70 10040 80 10040
+70 10040 80 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+NULL 10050 50 10050
+NULL 10050 50 10050
+NULL 10050 50 10050
+NULL 10050 NULL 10050
+NULL 35 NULL 35
+NULL NULL 12 NULL
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN smb_input2 b ON a.value <=> b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN smb_input2 b ON a.value <=> b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 100 100
+10 100 100 100
+10 100 12 100
+10 100 12 100
+10 1000 10 1000
+100 100 10 100
+100 100 10 100
+100 100 100 100
+100 100 12 100
+12 100 10 100
+12 100 10 100
+12 100 100 100
+12 100 12 100
+12 NULL 12 NULL
+12 NULL NULL NULL
+15 10015 15 10015
+20 10020 20 10020
+25 10025 25 10025
+30 10030 30 10030
+35 10035 35 10035
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 80 10040
+40 10040 80 10040
+40 10040 80 10040
+40 10040 80 10040
+5 10005 5 10005
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 NULL 10050
+50 10050 NULL 10050
+50 10050 NULL 10050
+60 10040 40 10040
+60 10040 40 10040
+60 10040 40 10040
+60 10040 40 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 80 10040
+60 10040 80 10040
+60 10040 80 10040
+60 10040 80 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 80 10040
+70 10040 80 10040
+70 10040 80 10040
+70 10040 80 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+NULL 10050 50 10050
+NULL 10050 50 10050
+NULL 10050 50 10050
+NULL 10050 NULL 10050
+NULL 35 NULL 35
+NULL NULL 12 NULL
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 100 100
+10 100 100 100
+10 100 12 100
+10 100 12 100
+10 1000 10 1000
+100 100 10 100
+100 100 10 100
+100 100 100 100
+100 100 12 100
+12 100 10 100
+12 100 10 100
+12 100 100 100
+12 100 12 100
+12 NULL 12 NULL
+12 NULL NULL NULL
+15 10015 15 10015
+20 10020 20 10020
+25 10025 25 10025
+30 10030 30 10030
+35 10035 35 10035
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 80 10040
+40 10040 80 10040
+40 10040 80 10040
+40 10040 80 10040
+5 10005 5 10005
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 NULL 10050
+50 10050 NULL 10050
+50 10050 NULL 10050
+60 10040 40 10040
+60 10040 40 10040
+60 10040 40 10040
+60 10040 40 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 80 10040
+60 10040 80 10040
+60 10040 80 10040
+60 10040 80 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 80 10040
+70 10040 80 10040
+70 10040 80 10040
+70 10040 80 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+NULL 10050 50 10050
+NULL 10050 50 10050
+NULL 10050 50 10050
+NULL 10050 NULL 10050
+NULL 35 NULL 35
+NULL NULL 12 NULL
+NULL NULL NULL NULL
+PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN smb_input2 b ON a.value <=> b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN smb_input2 b ON a.value <=> b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@smb_input2
+#### A masked pattern was here ####
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 10 100
+10 100 100 100
+10 100 100 100
+10 100 12 100
+10 100 12 100
+10 1000 10 1000
+100 100 10 100
+100 100 10 100
+100 100 100 100
+100 100 12 100
+12 100 10 100
+12 100 10 100
+12 100 100 100
+12 100 12 100
+12 NULL 12 NULL
+12 NULL NULL NULL
+15 10015 15 10015
+20 10020 20 10020
+25 10025 25 10025
+30 10030 30 10030
+35 10035 35 10035
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 40 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 60 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 70 10040
+40 10040 80 10040
+40 10040 80 10040
+40 10040 80 10040
+40 10040 80 10040
+5 10005 5 10005
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 50 10050
+50 10050 NULL 10050
+50 10050 NULL 10050
+50 10050 NULL 10050
+60 10040 40 10040
+60 10040 40 10040
+60 10040 40 10040
+60 10040 40 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 60 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 70 10040
+60 10040 80 10040
+60 10040 80 10040
+60 10040 80 10040
+60 10040 80 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 40 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 60 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 70 10040
+70 10040 80 10040
+70 10040 80 10040
+70 10040 80 10040
+70 10040 80 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 40 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 60 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 70 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+80 10040 80 10040
+NULL 10050 50 10050
+NULL 10050 50 10050
+NULL 10050 50 10050
+NULL 10050 NULL 10050
+NULL 35 NULL 35
+NULL NULL 12 NULL
+NULL NULL NULL NULL
+PREHOOK: query: --HIVE-3315 join predicate transitive
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL
+PREHOOK: type: QUERY
+POSTHOOK: query: --HIVE-3315 join predicate transitive
+explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is null (type: boolean)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: null (type: int)
+ sort order: +
+ Map-reduce partition columns: null (type: int)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: int)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is null (type: boolean)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: null (type: int)
+ sort order: +
+ Map-reduce partition columns: null (type: int)
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 null (type: int)
+ 1 null (type: int)
+ nullSafes: [true]
+ outputColumnNames: _col1, _col5
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: null (type: int), _col1 (type: int), _col5 (type: int), null (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL
+PREHOOK: type: QUERY
+PREHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@myinput1
+#### A masked pattern was here ####
+NULL 10 10 NULL
+NULL 10 48 NULL
+NULL 10 NULL NULL
+NULL 35 10 NULL
+NULL 35 48 NULL
+NULL 35 NULL NULL
+NULL NULL 10 NULL
+NULL NULL 48 NULL
+NULL NULL NULL NULL
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/leftsemijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/leftsemijoin.q.out b/ql/src/test/results/clientpositive/llap/leftsemijoin.q.out
new file mode 100644
index 0000000..11f0bb0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/leftsemijoin.q.out
@@ -0,0 +1,114 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table sales
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table sales
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table things
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table things
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE sales (name STRING, id INT)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@sales
+POSTHOOK: query: CREATE TABLE sales (name STRING, id INT)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@sales
+PREHOOK: query: CREATE TABLE things (id INT, name STRING) partitioned by (ds string)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@things
+POSTHOOK: query: CREATE TABLE things (id INT, name STRING) partitioned by (ds string)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@things
+PREHOOK: query: load data local inpath '../../data/files/sales.txt' INTO TABLE sales
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@sales
+POSTHOOK: query: load data local inpath '../../data/files/sales.txt' INTO TABLE sales
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@sales
+PREHOOK: query: load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@things
+POSTHOOK: query: load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@things
+POSTHOOK: Output: default@things@ds=2011-10-23
+PREHOOK: query: load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@things
+POSTHOOK: query: load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@things
+POSTHOOK: Output: default@things@ds=2011-10-24
+PREHOOK: query: SELECT name,id FROM sales
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sales
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT name,id FROM sales
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sales
+#### A masked pattern was here ####
+Hank 2
+Joe 2
+PREHOOK: query: SELECT id,name FROM things
+PREHOOK: type: QUERY
+PREHOOK: Input: default@things
+PREHOOK: Input: default@things@ds=2011-10-23
+PREHOOK: Input: default@things@ds=2011-10-24
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT id,name FROM things
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@things
+POSTHOOK: Input: default@things@ds=2011-10-23
+POSTHOOK: Input: default@things@ds=2011-10-24
+#### A masked pattern was here ####
+2 Tie
+2 Tie
+PREHOOK: query: SELECT name,id FROM sales LEFT SEMI JOIN things ON (sales.id = things.id)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sales
+PREHOOK: Input: default@things
+PREHOOK: Input: default@things@ds=2011-10-23
+PREHOOK: Input: default@things@ds=2011-10-24
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT name,id FROM sales LEFT SEMI JOIN things ON (sales.id = things.id)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sales
+POSTHOOK: Input: default@things
+POSTHOOK: Input: default@things@ds=2011-10-23
+POSTHOOK: Input: default@things@ds=2011-10-24
+#### A masked pattern was here ####
+Hank 2
+Joe 2
+PREHOOK: query: drop table sales
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@sales
+PREHOOK: Output: default@sales
+POSTHOOK: query: drop table sales
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@sales
+POSTHOOK: Output: default@sales
+PREHOOK: query: drop table things
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@things
+PREHOOK: Output: default@things
+POSTHOOK: query: drop table things
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@things
+POSTHOOK: Output: default@things
[21/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
new file mode 100644
index 0000000..423a76a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
@@ -0,0 +1,1477 @@
+PREHOOK: query: -- Hybrid Grace Hash Join
+-- Test n-way join
+SELECT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: -- Hybrid Grace Hash Join
+-- Test n-way join
+SELECT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+1
+PREHOOK: query: -- 3-way mapjoin (1 big table, 2 small tables)
+SELECT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: -- 3-way mapjoin (1 big table, 2 small tables)
+SELECT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+1
+PREHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: z
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ input vertices:
+ 0 Map 1
+ 2 Map 4
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+428
+PREHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: z
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ input vertices:
+ 0 Map 1
+ 2 Map 4
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+428
+PREHOOK: query: -- 4-way mapjoin (1 big table, 3 small tables)
+SELECT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: -- 4-way mapjoin (1 big table, 3 small tables)
+SELECT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+1
+PREHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN srcpart w ON (x.key = w.key)
+JOIN src y ON (y.key = x.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN srcpart w ON (x.key = w.key)
+JOIN src y ON (y.key = x.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE), Map 5 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: z
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ Inner Join 0 to 3
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ 3 key (type: string)
+ input vertices:
+ 0 Map 1
+ 2 Map 4
+ 3 Map 5
+ Statistics: Num rows: 3300 Data size: 35059 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: w
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN srcpart w ON (x.key = w.key)
+JOIN src y ON (y.key = x.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN srcpart w ON (x.key = w.key)
+JOIN src y ON (y.key = x.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+5680
+PREHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN srcpart w ON (x.key = w.key)
+JOIN src y ON (y.key = x.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN srcpart w ON (x.key = w.key)
+JOIN src y ON (y.key = x.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE), Map 5 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: z
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ Inner Join 0 to 3
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ 3 key (type: string)
+ input vertices:
+ 0 Map 1
+ 2 Map 4
+ 3 Map 5
+ Statistics: Num rows: 3300 Data size: 35059 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: w
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN srcpart w ON (x.key = w.key)
+JOIN src y ON (y.key = x.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN srcpart w ON (x.key = w.key)
+JOIN src y ON (y.key = x.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+5680
+PREHOOK: query: -- 2 sets of 3-way mapjoin under 2 different tasks
+SELECT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: -- 2 sets of 3-way mapjoin under 2 different tasks
+SELECT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+1
+PREHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+UNION
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.value = z.value)
+JOIN src y ON (y.value = x.value)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+UNION
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.value = z.value)
+JOIN src y ON (y.value = x.value)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE), Map 6 (BROADCAST_EDGE)
+ Map 8 <- Map 10 (BROADCAST_EDGE), Map 7 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE), Union 4 (CONTAINS)
+ Reducer 5 <- Union 4 (SIMPLE_EDGE)
+ Reducer 9 <- Map 8 (SIMPLE_EDGE), Union 4 (CONTAINS)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 10
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: z
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ input vertices:
+ 0 Map 1
+ 2 Map 6
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 7
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 8
+ Map Operator Tree:
+ TableScan
+ alias: z
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 value (type: string)
+ 1 value (type: string)
+ 2 value (type: string)
+ input vertices:
+ 0 Map 7
+ 2 Map 10
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: bigint)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: bigint)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: bigint)
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reducer 5
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: bigint)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 9
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: bigint)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: bigint)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: bigint)
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Union 4
+ Vertex: Union 4
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+UNION
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.value = z.value)
+JOIN src y ON (y.value = x.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+UNION
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.value = z.value)
+JOIN src y ON (y.value = x.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+428
+452
+PREHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+UNION
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.value = z.value)
+JOIN src y ON (y.value = x.value)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+UNION
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.value = z.value)
+JOIN src y ON (y.value = x.value)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE), Map 6 (BROADCAST_EDGE)
+ Map 8 <- Map 10 (BROADCAST_EDGE), Map 7 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE), Union 4 (CONTAINS)
+ Reducer 5 <- Union 4 (SIMPLE_EDGE)
+ Reducer 9 <- Map 8 (SIMPLE_EDGE), Union 4 (CONTAINS)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 10
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: z
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ input vertices:
+ 0 Map 1
+ 2 Map 6
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 7
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 8
+ Map Operator Tree:
+ TableScan
+ alias: z
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 value (type: string)
+ 1 value (type: string)
+ 2 value (type: string)
+ input vertices:
+ 0 Map 7
+ 2 Map 10
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: bigint)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: bigint)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: bigint)
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reducer 5
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: bigint)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 9
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: bigint)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: bigint)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: bigint)
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Union 4
+ Vertex: Union 4
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+UNION
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.value = z.value)
+JOIN src y ON (y.value = x.value)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.key = z.key)
+JOIN src y ON (y.key = x.key)
+UNION
+SELECT COUNT(*)
+FROM src1 x JOIN srcpart z ON (x.value = z.value)
+JOIN src y ON (y.value = x.value)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+428
+452
+PREHOOK: query: -- A chain of 2 sets of 3-way mapjoin under the same task
+SELECT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: -- A chain of 2 sets of 3-way mapjoin under the same task
+SELECT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+1
+PREHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x
+JOIN srcpart z1 ON (x.key = z1.key)
+JOIN src y1 ON (x.key = y1.key)
+JOIN srcpart z2 ON (x.value = z2.value)
+JOIN src y2 ON (x.value = y2.value)
+WHERE z1.key < 'zzzzzzzz' AND z2.key < 'zzzzzzzzzz'
+ AND y1.value < 'zzzzzzzz' AND y2.value < 'zzzzzzzzzz'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x
+JOIN srcpart z1 ON (x.key = z1.key)
+JOIN src y1 ON (x.key = y1.key)
+JOIN srcpart z2 ON (x.value = z2.value)
+JOIN src y2 ON (x.value = y2.value)
+WHERE z1.key < 'zzzzzzzz' AND z2.key < 'zzzzzzzzzz'
+ AND y1.value < 'zzzzzzzz' AND y2.value < 'zzzzzzzzzz'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE), Map 5 (BROADCAST_EDGE), Map 6 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((key is not null and value is not null) and (value < 'zzzzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: z1
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 'zzzzzzzz') (type: boolean)
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ outputColumnNames: _col1
+ input vertices:
+ 0 Map 1
+ 2 Map 4
+ Statistics: Num rows: 1465 Data size: 15565 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 _col1 (type: string)
+ 1 value (type: string)
+ 2 value (type: string)
+ input vertices:
+ 1 Map 5
+ 2 Map 6
+ Statistics: Num rows: 3223 Data size: 34243 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: y1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((key is not null and (value < 'zzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+ Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: z2
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((value is not null and (key < 'zzzzzzzzzz')) and (value < 'zzzzzzzzzz')) (type: boolean)
+ Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: y2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (value < 'zzzzzzzzzz') (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*)
+FROM src1 x
+JOIN srcpart z1 ON (x.key = z1.key)
+JOIN src y1 ON (x.key = y1.key)
+JOIN srcpart z2 ON (x.value = z2.value)
+JOIN src y2 ON (x.value = y2.value)
+WHERE z1.key < 'zzzzzzzz' AND z2.key < 'zzzzzzzzzz'
+ AND y1.value < 'zzzzzzzz' AND y2.value < 'zzzzzzzzzz'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*)
+FROM src1 x
+JOIN srcpart z1 ON (x.key = z1.key)
+JOIN src y1 ON (x.key = y1.key)
+JOIN srcpart z2 ON (x.value = z2.value)
+JOIN src y2 ON (x.value = y2.value)
+WHERE z1.key < 'zzzzzzzz' AND z2.key < 'zzzzzzzzzz'
+ AND y1.value < 'zzzzzzzz' AND y2.value < 'zzzzzzzzzz'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+18256
+PREHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x
+JOIN srcpart z1 ON (x.key = z1.key)
+JOIN src y1 ON (x.key = y1.key)
+JOIN srcpart z2 ON (x.value = z2.value)
+JOIN src y2 ON (x.value = y2.value)
+WHERE z1.key < 'zzzzzzzz' AND z2.key < 'zzzzzzzzzz'
+ AND y1.value < 'zzzzzzzz' AND y2.value < 'zzzzzzzzzz'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT COUNT(*)
+FROM src1 x
+JOIN srcpart z1 ON (x.key = z1.key)
+JOIN src y1 ON (x.key = y1.key)
+JOIN srcpart z2 ON (x.value = z2.value)
+JOIN src y2 ON (x.value = y2.value)
+WHERE z1.key < 'zzzzzzzz' AND z2.key < 'zzzzzzzzzz'
+ AND y1.value < 'zzzzzzzz' AND y2.value < 'zzzzzzzzzz'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE), Map 5 (BROADCAST_EDGE), Map 6 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((key is not null and value is not null) and (value < 'zzzzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: z1
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 'zzzzzzzz') (type: boolean)
+ Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ outputColumnNames: _col1
+ input vertices:
+ 0 Map 1
+ 2 Map 4
+ Statistics: Num rows: 1465 Data size: 15565 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 _col1 (type: string)
+ 1 value (type: string)
+ 2 value (type: string)
+ input vertices:
+ 1 Map 5
+ 2 Map 6
+ Statistics: Num rows: 3223 Data size: 34243 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: y1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((key is not null and (value < 'zzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+ Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: z2
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ((value is not null and (key < 'zzzzzzzzzz')) and (value < 'zzzzzzzzzz')) (type: boolean)
+ Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: y2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (value < 'zzzzzzzzzz') (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT COUNT(*)
+FROM src1 x
+JOIN srcpart z1 ON (x.key = z1.key)
+JOIN src y1 ON (x.key = y1.key)
+JOIN srcpart z2 ON (x.value = z2.value)
+JOIN src y2 ON (x.value = y2.value)
+WHERE z1.key < 'zzzzzzzz' AND z2.key < 'zzzzzzzzzz'
+ AND y1.value < 'zzzzzzzz' AND y2.value < 'zzzzzzzzzz'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT COUNT(*)
+FROM src1 x
+JOIN srcpart z1 ON (x.key = z1.key)
+JOIN src y1 ON (x.key = y1.key)
+JOIN srcpart z2 ON (x.value = z2.value)
+JOIN src y2 ON (x.value = y2.value)
+WHERE z1.key < 'zzzzzzzz' AND z2.key < 'zzzzzzzzzz'
+ AND y1.value < 'zzzzzzzz' AND y2.value < 'zzzzzzzzzz'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+18256
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_acid_dynamic_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_acid_dynamic_partition.q.out b/ql/src/test/results/clientpositive/llap/insert_acid_dynamic_partition.q.out
new file mode 100644
index 0000000..07eedf3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_acid_dynamic_partition.q.out
@@ -0,0 +1,48 @@
+PREHOOK: query: create table acid_dynamic(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_dynamic
+POSTHOOK: query: create table acid_dynamic(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_dynamic
+PREHOOK: query: insert into table acid_dynamic partition (ds) select cint, cast(cstring1 as varchar(128)), cstring2 from alltypesorc where cint is not null and cint < 0 order by cint limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_dynamic
+POSTHOOK: query: insert into table acid_dynamic partition (ds) select cint, cast(cstring1 as varchar(128)), cstring2 from alltypesorc where cint is not null and cint < 0 order by cint limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_dynamic@ds=4KWs6gw7lv2WYd66P
+POSTHOOK: Output: default@acid_dynamic@ds=4hA4KQj2vD3fI6gX82220d
+POSTHOOK: Output: default@acid_dynamic@ds=KbaDXiN85adbHRx58v
+POSTHOOK: Output: default@acid_dynamic@ds=P76636jJ6qM17d7DIy
+POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4KWs6gw7lv2WYd66P).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4KWs6gw7lv2WYd66P).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4hA4KQj2vD3fI6gX82220d).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4hA4KQj2vD3fI6gX82220d).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: acid_dynamic PARTITION(ds=KbaDXiN85adbHRx58v).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dynamic PARTITION(ds=KbaDXiN85adbHRx58v).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: acid_dynamic PARTITION(ds=P76636jJ6qM17d7DIy).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_dynamic PARTITION(ds=P76636jJ6qM17d7DIy).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select * from acid_dynamic order by a,b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_dynamic
+PREHOOK: Input: default@acid_dynamic@ds=4KWs6gw7lv2WYd66P
+PREHOOK: Input: default@acid_dynamic@ds=4hA4KQj2vD3fI6gX82220d
+PREHOOK: Input: default@acid_dynamic@ds=KbaDXiN85adbHRx58v
+PREHOOK: Input: default@acid_dynamic@ds=P76636jJ6qM17d7DIy
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_dynamic order by a,b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_dynamic
+POSTHOOK: Input: default@acid_dynamic@ds=4KWs6gw7lv2WYd66P
+POSTHOOK: Input: default@acid_dynamic@ds=4hA4KQj2vD3fI6gX82220d
+POSTHOOK: Input: default@acid_dynamic@ds=KbaDXiN85adbHRx58v
+POSTHOOK: Input: default@acid_dynamic@ds=P76636jJ6qM17d7DIy
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa P76636jJ6qM17d7DIy
+-1073051226 A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d
+-1072910839 0iqrc5 KbaDXiN85adbHRx58v
+-1072081801 dPkN74F7 4KWs6gw7lv2WYd66P
+-1072076362 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_acid_not_bucketed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_acid_not_bucketed.q.out b/ql/src/test/results/clientpositive/llap/insert_acid_not_bucketed.q.out
new file mode 100644
index 0000000..985ae40
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_acid_not_bucketed.q.out
@@ -0,0 +1,36 @@
+PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_notbucketed
+POSTHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_notbucketed
+PREHOOK: query: insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_notbucketed
+POSTHOOK: query: insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_notbucketed
+POSTHOOK: Lineage: acid_notbucketed.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_notbucketed.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: select * from acid_notbucketed
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_notbucketed
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acid_notbucketed
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_notbucketed
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa
+-1073051226 A34p7oRr2WvUJNf
+-1072910839 0iqrc5
+-1072081801 dPkN74F7
+-1072076362 2uLyD28144vklju213J1mr
+-1071480828 aw724t8c5558x2xneC624
+-1071363017 Anj0oF
+-1070883071 0ruyd6Y50JpdGRf6HqD
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/insert_into1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_into1.q.out b/ql/src/test/results/clientpositive/llap/insert_into1.q.out
new file mode 100644
index 0000000..cfbfadc
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/insert_into1.q.out
@@ -0,0 +1,381 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE insert_into1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+DROP TABLE insert_into1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE insert_into1 (key int, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: CREATE TABLE insert_into1 (key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@insert_into1
+PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 100
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: INSERT INTO TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+-1142373758
+PREHOOK: query: explain
+select count(*) from insert_into1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from insert_into1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from insert_into1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+100
+PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 100
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 100
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT INTO TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: INSERT INTO TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+-2284747516
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from insert_into1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+200
+PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.insert_into1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src ORDER BY key LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@insert_into1
+POSTHOOK: Lineage: insert_into1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: insert_into1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
+ SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1
+) t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+-2693537120
+PREHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT COUNT(*) FROM insert_into1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from insert_into1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from insert_into1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@insert_into1
+#### A masked pattern was here ####
+10
+PREHOOK: query: DROP TABLE insert_into1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@insert_into1
+PREHOOK: Output: default@insert_into1
+POSTHOOK: query: DROP TABLE insert_into1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@insert_into1
+POSTHOOK: Output: default@insert_into1
[44/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out
new file mode 100644
index 0000000..66869af
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out
@@ -0,0 +1,707 @@
+PREHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: -- Since the leftmost table is assumed as the big table, arrange the tables in the join accordingly
+explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since the leftmost table is assumed as the big table, arrange the tables in the join accordingly
+explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ /bucket_big/ds=2008-04-09 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+38
+PREHOOK: query: -- The mapjoin should fail resulting in the sort-merge join
+explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The mapjoin should fail resulting in the sort-merge join
+explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ /bucket_big/ds=2008-04-09 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+38
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out
new file mode 100644
index 0000000..096e350
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out
@@ -0,0 +1,1014 @@
+PREHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ /bucket_small/ds=2008-04-09 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 1 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 0 Map 1
+ Position of Big Table: 1
+ Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [b]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+38
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ /bucket_small/ds=2008-04-09 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+38
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ /bucket_small/ds=2008-04-09 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+38
[04/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/ptf.q.out b/ql/src/test/results/clientpositive/llap/ptf.q.out
new file mode 100644
index 0000000..59c5def
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/ptf.q.out
@@ -0,0 +1,4895 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+--1. test1
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+--1. test1
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int), p_retailprice (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noop
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: sum_window_2
+ arguments: _col7
+ name: sum
+ window function: GenericUDAFSumDouble
+ window frame: PRECEDING(MAX)~
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), sum_window_2 (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001
+Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001
+Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68
+Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38
+Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001
+Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62
+Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68
+Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95
+Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34
+Manufacturer#3 almond antique misty red olive 1 4 4 6195.32
+Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61
+Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67
+Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27
+Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001
+Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69
+Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004
+Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18
+Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66
+PREHOOK: query: -- 2. testJoinWithNoop
+explain
+select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2. testJoinWithNoop
+explain
+select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: p1
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: p_partkey is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_partkey (type: int)
+ sort order: +
+ Map-reduce partition columns: p_partkey (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: p2
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: p_partkey is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_partkey (type: int)
+ sort order: +
+ Map-reduce partition columns: p_partkey (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 p_partkey (type: int)
+ 1 p_partkey (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: j
+ output shape: _col1: string, _col2: string, _col5: int
+ type: SUBQUERY
+ Partition table definition
+ input alias: ptf_1
+ name: noop
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: lag_window_0
+ arguments: _col5, 1, _col5
+ name: lag
+ window function: GenericUDAFLagEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), (_col5 - lag_window_0) (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 0
+Manufacturer#1 almond antique burnished rose metallic 2 0
+Manufacturer#1 almond antique burnished rose metallic 2 0
+Manufacturer#1 almond antique burnished rose metallic 2 0
+Manufacturer#1 almond antique chartreuse lavender yellow 34 32
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 -28
+Manufacturer#1 almond aquamarine burnished black steel 28 22
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 14
+Manufacturer#2 almond antique violet chocolate turquoise 14 0
+Manufacturer#2 almond antique violet turquoise frosted 40 26
+Manufacturer#2 almond aquamarine midnight light salmon 2 -38
+Manufacturer#2 almond aquamarine rose maroon antique 25 23
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 -7
+Manufacturer#3 almond antique chartreuse khaki white 17 0
+Manufacturer#3 almond antique forest lavender goldenrod 14 -3
+Manufacturer#3 almond antique metallic orange dim 19 5
+Manufacturer#3 almond antique misty red olive 1 -18
+Manufacturer#3 almond antique olive coral navajo 45 44
+Manufacturer#4 almond antique gainsboro frosted violet 10 0
+Manufacturer#4 almond antique violet mint lemon 39 29
+Manufacturer#4 almond aquamarine floral ivory bisque 27 -12
+Manufacturer#4 almond aquamarine yellow dodger mint 7 -20
+Manufacturer#4 almond azure aquamarine papaya violet 12 5
+Manufacturer#5 almond antique blue firebrick mint 31 0
+Manufacturer#5 almond antique medium spring khaki 6 -25
+Manufacturer#5 almond antique sky peru orange 2 -4
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 44
+Manufacturer#5 almond azure blanched chiffon midnight 23 -23
+PREHOOK: query: -- 3. testOnlyPTF
+explain
+select p_mfgr, p_name, p_size
+from noop(on part
+partition by p_mfgr
+order by p_name)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3. testOnlyPTF
+explain
+select p_mfgr, p_name, p_size
+from noop(on part
+partition by p_mfgr
+order by p_name)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noop
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size
+from noop(on part
+partition by p_mfgr
+order by p_name)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size
+from noop(on part
+partition by p_mfgr
+order by p_name)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2
+Manufacturer#1 almond antique burnished rose metallic 2
+Manufacturer#1 almond antique chartreuse lavender yellow 34
+Manufacturer#1 almond antique salmon chartreuse burlywood 6
+Manufacturer#1 almond aquamarine burnished black steel 28
+Manufacturer#1 almond aquamarine pink moccasin thistle 42
+Manufacturer#2 almond antique violet chocolate turquoise 14
+Manufacturer#2 almond antique violet turquoise frosted 40
+Manufacturer#2 almond aquamarine midnight light salmon 2
+Manufacturer#2 almond aquamarine rose maroon antique 25
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18
+Manufacturer#3 almond antique chartreuse khaki white 17
+Manufacturer#3 almond antique forest lavender goldenrod 14
+Manufacturer#3 almond antique metallic orange dim 19
+Manufacturer#3 almond antique misty red olive 1
+Manufacturer#3 almond antique olive coral navajo 45
+Manufacturer#4 almond antique gainsboro frosted violet 10
+Manufacturer#4 almond antique violet mint lemon 39
+Manufacturer#4 almond aquamarine floral ivory bisque 27
+Manufacturer#4 almond aquamarine yellow dodger mint 7
+Manufacturer#4 almond azure aquamarine papaya violet 12
+Manufacturer#5 almond antique blue firebrick mint 31
+Manufacturer#5 almond antique medium spring khaki 6
+Manufacturer#5 almond antique sky peru orange 2
+Manufacturer#5 almond aquamarine dodger light gainsboro 46
+Manufacturer#5 almond azure blanched chiffon midnight 23
+PREHOOK: query: -- 4. testPTFAlias
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ ) abc
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 4. testPTFAlias
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ ) abc
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int), p_retailprice (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: TABLE
+ Partition table definition
+ input alias: abc
+ name: noop
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: sum_window_2
+ arguments: _col7
+ name: sum
+ window function: GenericUDAFSumDouble
+ window frame: PRECEDING(MAX)~
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), sum_window_2 (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ ) abc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ ) abc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001
+Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001
+Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68
+Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38
+Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001
+Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62
+Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68
+Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95
+Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34
+Manufacturer#3 almond antique misty red olive 1 4 4 6195.32
+Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61
+Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67
+Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27
+Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001
+Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69
+Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004
+Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18
+Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66
+PREHOOK: query: -- 5. testPTFAndWhereWithWindowing
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 5. testPTFAndWhereWithWindowing
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noop
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: lag_window_2
+ arguments: _col5, 1, _col5
+ name: lag
+ window function: GenericUDAFLagEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), _col5 (type: int), (_col5 - lag_window_2) (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 34 32
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 6 -28
+Manufacturer#1 almond aquamarine burnished black steel 28 5 4 28 22
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 42 14
+Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 14 0
+Manufacturer#2 almond antique violet turquoise frosted 40 2 2 40 26
+Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 2 -38
+Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 25 23
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 18 -7
+Manufacturer#3 almond antique chartreuse khaki white 17 1 1 17 0
+Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 14 -3
+Manufacturer#3 almond antique metallic orange dim 19 3 3 19 5
+Manufacturer#3 almond antique misty red olive 1 4 4 1 -18
+Manufacturer#3 almond antique olive coral navajo 45 5 5 45 44
+Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 10 0
+Manufacturer#4 almond antique violet mint lemon 39 2 2 39 29
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 27 -12
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 7 -20
+Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 12 5
+Manufacturer#5 almond antique blue firebrick mint 31 1 1 31 0
+Manufacturer#5 almond antique medium spring khaki 6 2 2 6 -25
+Manufacturer#5 almond antique sky peru orange 2 3 3 2 -4
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 46 44
+Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 23 -23
+PREHOOK: query: -- 6. testSWQAndPTFAndGBy
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+group by p_mfgr, p_name, p_size
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 6. testSWQAndPTFAndGBy
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+group by p_mfgr, p_name, p_size
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noop
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int)
+ outputColumnNames: _col2, _col1, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col2 (type: string), _col1 (type: string), _col5 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
+ sort order: +++
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col0: string, _col1: string, _col2: int
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col0
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: lag_window_2
+ arguments: _col2, 1, _col2
+ name: lag
+ window function: GenericUDAFLagEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), _col2 (type: int), (_col2 - lag_window_2) (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+group by p_mfgr, p_name, p_size
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noop(on part
+ partition by p_mfgr
+ order by p_name
+ )
+group by p_mfgr, p_name, p_size
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2 0
+Manufacturer#1 almond antique chartreuse lavender yellow 34 2 2 34 32
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 3 3 6 -28
+Manufacturer#1 almond aquamarine burnished black steel 28 4 4 28 22
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 5 5 42 14
+Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 14 0
+Manufacturer#2 almond antique violet turquoise frosted 40 2 2 40 26
+Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 2 -38
+Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 25 23
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 18 -7
+Manufacturer#3 almond antique chartreuse khaki white 17 1 1 17 0
+Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 14 -3
+Manufacturer#3 almond antique metallic orange dim 19 3 3 19 5
+Manufacturer#3 almond antique misty red olive 1 4 4 1 -18
+Manufacturer#3 almond antique olive coral navajo 45 5 5 45 44
+Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 10 0
+Manufacturer#4 almond antique violet mint lemon 39 2 2 39 29
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 27 -12
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 7 -20
+Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 12 5
+Manufacturer#5 almond antique blue firebrick mint 31 1 1 31 0
+Manufacturer#5 almond antique medium spring khaki 6 2 2 6 -25
+Manufacturer#5 almond antique sky peru orange 2 3 3 2 -4
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 46 44
+Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 23 -23
+PREHOOK: query: -- 7. testJoin
+explain
+select abc.*
+from noop(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 7. testJoin
+explain
+select abc.*
+from noop(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Map 4 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_partkey (type: int), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: p1
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: p_partkey is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_partkey (type: int)
+ sort order: +
+ Map-reduce partition columns: p_partkey (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: double), VALUE._col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string
+ type: TABLE
+ Partition table definition
+ input alias: abc
+ name: noop
+ order by: _col1
+ output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: _col0 is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 p_partkey (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select abc.*
+from noop(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select abc.*
+from noop(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+105685 almond antique violet chocolate turquoise Manufacturer#2 Brand#22 MEDIUM ANODIZED COPPER 14 MED CAN 1690.68 ly pending requ
+110592 almond antique salmon chartreuse burlywood Manufacturer#1 Brand#15 PROMO BURNISHED NICKEL 6 JUMBO PKG 1602.59 to the furiously
+112398 almond antique metallic orange dim Manufacturer#3 Brand#32 MEDIUM BURNISHED BRASS 19 JUMBO JAR 1410.39 ole car
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+132666 almond aquamarine rose maroon antique Manufacturer#2 Brand#24 SMALL POLISHED NICKEL 25 MED BOX 1698.66 even
+144293 almond antique olive coral navajo Manufacturer#3 Brand#34 STANDARD POLISHED STEEL 45 JUMBO CAN 1337.29 ag furiously about
+146985 almond aquamarine midnight light salmon Manufacturer#2 Brand#23 MEDIUM BURNISHED COPPER 2 SM CASE 2031.98 s cajole caref
+15103 almond aquamarine dodger light gainsboro Manufacturer#5 Brand#53 ECONOMY BURNISHED STEEL 46 LG PACK 1018.1 packages hinder carefu
+155733 almond antique sky peru orange Manufacturer#5 Brand#53 SMALL PLATED BRASS 2 WRAP DRUM 1788.73 furiously. bra
+17273 almond antique forest lavender goldenrod Manufacturer#3 Brand#35 PROMO ANODIZED TIN 14 JUMBO CASE 1190.27 along the
+17927 almond aquamarine yellow dodger mint Manufacturer#4 Brand#41 ECONOMY BRUSHED COPPER 7 SM PKG 1844.92 ites. eve
+191709 almond antique violet turquoise frosted Manufacturer#2 Brand#22 ECONOMY POLISHED STEEL 40 MED BOX 1800.7 haggle
+192697 almond antique blue firebrick mint Manufacturer#5 Brand#52 MEDIUM BURNISHED TIN 31 LG DRUM 1789.69 ickly ir
+195606 almond aquamarine sandy cyan gainsboro Manufacturer#2 Brand#25 STANDARD PLATED TIN 18 SM PKG 1701.6 ic de
+33357 almond azure aquamarine papaya violet Manufacturer#4 Brand#41 STANDARD ANODIZED TIN 12 WRAP CASE 1290.35 reful
+40982 almond antique misty red olive Manufacturer#3 Brand#32 ECONOMY PLATED COPPER 1 LG PKG 1922.98 c foxes can s
+42669 almond antique medium spring khaki Manufacturer#5 Brand#51 STANDARD BURNISHED TIN 6 MED CAN 1611.66 sits haggl
+45261 almond aquamarine floral ivory bisque Manufacturer#4 Brand#42 SMALL PLATED STEEL 27 WRAP CASE 1206.26 careful
+48427 almond antique violet mint lemon Manufacturer#4 Brand#42 PROMO POLISHED STEEL 39 SM CASE 1375.42 hely ironic i
+49671 almond antique gainsboro frosted violet Manufacturer#4 Brand#41 SMALL BRUSHED BRASS 10 SM BOX 1620.67 ccounts run quick
+65667 almond aquamarine pink moccasin thistle Manufacturer#1 Brand#12 LARGE BURNISHED STEEL 42 JUMBO CASE 1632.66 e across the expr
+78486 almond azure blanched chiffon midnight Manufacturer#5 Brand#52 LARGE BRUSHED BRASS 23 MED BAG 1464.48 hely blith
+85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull
+86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully
+90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl
+PREHOOK: query: -- 8. testJoinRight
+explain
+select abc.*
+from part p1 join noop(on part
+partition by p_mfgr
+order by p_name
+) abc on abc.p_partkey = p1.p_partkey
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 8. testJoinRight
+explain
+select abc.*
+from part p1 join noop(on part
+partition by p_mfgr
+order by p_name
+) abc on abc.p_partkey = p1.p_partkey
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE)
+ Reducer 4 <- Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: p1
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: p_partkey is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_partkey (type: int)
+ sort order: +
+ Map-reduce partition columns: p_partkey (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_partkey (type: int), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 p_partkey (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 4
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: double), VALUE._col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string
+ type: TABLE
+ Partition table definition
+ input alias: abc
+ name: noop
+ order by: _col1
+ output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: _col0 is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select abc.*
+from part p1 join noop(on part
+partition by p_mfgr
+order by p_name
+) abc on abc.p_partkey = p1.p_partkey
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select abc.*
+from part p1 join noop(on part
+partition by p_mfgr
+order by p_name
+) abc on abc.p_partkey = p1.p_partkey
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+105685 almond antique violet chocolate turquoise Manufacturer#2 Brand#22 MEDIUM ANODIZED COPPER 14 MED CAN 1690.68 ly pending requ
+110592 almond antique salmon chartreuse burlywood Manufacturer#1 Brand#15 PROMO BURNISHED NICKEL 6 JUMBO PKG 1602.59 to the furiously
+112398 almond antique metallic orange dim Manufacturer#3 Brand#32 MEDIUM BURNISHED BRASS 19 JUMBO JAR 1410.39 ole car
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+132666 almond aquamarine rose maroon antique Manufacturer#2 Brand#24 SMALL POLISHED NICKEL 25 MED BOX 1698.66 even
+144293 almond antique olive coral navajo Manufacturer#3 Brand#34 STANDARD POLISHED STEEL 45 JUMBO CAN 1337.29 ag furiously about
+146985 almond aquamarine midnight light salmon Manufacturer#2 Brand#23 MEDIUM BURNISHED COPPER 2 SM CASE 2031.98 s cajole caref
+15103 almond aquamarine dodger light gainsboro Manufacturer#5 Brand#53 ECONOMY BURNISHED STEEL 46 LG PACK 1018.1 packages hinder carefu
+155733 almond antique sky peru orange Manufacturer#5 Brand#53 SMALL PLATED BRASS 2 WRAP DRUM 1788.73 furiously. bra
+17273 almond antique forest lavender goldenrod Manufacturer#3 Brand#35 PROMO ANODIZED TIN 14 JUMBO CASE 1190.27 along the
+17927 almond aquamarine yellow dodger mint Manufacturer#4 Brand#41 ECONOMY BRUSHED COPPER 7 SM PKG 1844.92 ites. eve
+191709 almond antique violet turquoise frosted Manufacturer#2 Brand#22 ECONOMY POLISHED STEEL 40 MED BOX 1800.7 haggle
+192697 almond antique blue firebrick mint Manufacturer#5 Brand#52 MEDIUM BURNISHED TIN 31 LG DRUM 1789.69 ickly ir
+195606 almond aquamarine sandy cyan gainsboro Manufacturer#2 Brand#25 STANDARD PLATED TIN 18 SM PKG 1701.6 ic de
+33357 almond azure aquamarine papaya violet Manufacturer#4 Brand#41 STANDARD ANODIZED TIN 12 WRAP CASE 1290.35 reful
+40982 almond antique misty red olive Manufacturer#3 Brand#32 ECONOMY PLATED COPPER 1 LG PKG 1922.98 c foxes can s
+42669 almond antique medium spring khaki Manufacturer#5 Brand#51 STANDARD BURNISHED TIN 6 MED CAN 1611.66 sits haggl
+45261 almond aquamarine floral ivory bisque Manufacturer#4 Brand#42 SMALL PLATED STEEL 27 WRAP CASE 1206.26 careful
+48427 almond antique violet mint lemon Manufacturer#4 Brand#42 PROMO POLISHED STEEL 39 SM CASE 1375.42 hely ironic i
+49671 almond antique gainsboro frosted violet Manufacturer#4 Brand#41 SMALL BRUSHED BRASS 10 SM BOX 1620.67 ccounts run quick
+65667 almond aquamarine pink moccasin thistle Manufacturer#1 Brand#12 LARGE BURNISHED STEEL 42 JUMBO CASE 1632.66 e across the expr
+78486 almond azure blanched chiffon midnight Manufacturer#5 Brand#52 LARGE BRUSHED BRASS 23 MED BAG 1464.48 hely blith
+85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull
+86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully
+90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl
+PREHOOK: query: -- 9. testNoopWithMap
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmap(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 9. testNoopWithMap
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmap(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: p_name: string, p_mfgr: string, p_size: int
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmap
+ order by: p_name, p_size(DESC)
+ output shape: p_name: string, p_mfgr: string, p_size: int
+ partition by: p_mfgr
+ raw input shape:
+ transforms raw input: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Map-side function: true
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string), p_size (type: int)
+ sort order: ++-
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey2 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmap
+ order by: _col1, _col5(DESC)
+ output shape: _col1: string, _col2: string, _col5: int
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int)
+ sort order: ++-
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey2 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1, _col5(DESC)
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1, _col5
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmap(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmap(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1
+Manufacturer#1 almond antique burnished rose metallic 2 1
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4
+Manufacturer#1 almond aquamarine burnished black steel 28 5
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6
+Manufacturer#2 almond antique violet chocolate turquoise 14 1
+Manufacturer#2 almond antique violet turquoise frosted 40 2
+Manufacturer#2 almond aquamarine midnight light salmon 2 3
+Manufacturer#2 almond aquamarine rose maroon antique 25 4
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5
+Manufacturer#3 almond antique chartreuse khaki white 17 1
+Manufacturer#3 almond antique forest lavender goldenrod 14 2
+Manufacturer#3 almond antique metallic orange dim 19 3
+Manufacturer#3 almond antique misty red olive 1 4
+Manufacturer#3 almond antique olive coral navajo 45 5
+Manufacturer#4 almond antique gainsboro frosted violet 10 1
+Manufacturer#4 almond antique violet mint lemon 39 2
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4
+Manufacturer#4 almond azure aquamarine papaya violet 12 5
+Manufacturer#5 almond antique blue firebrick mint 31 1
+Manufacturer#5 almond antique medium spring khaki 6 2
+Manufacturer#5 almond antique sky peru orange 2 3
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4
+Manufacturer#5 almond azure blanched chiffon midnight 23 5
+PREHOOK: query: -- 10. testNoopWithMapWithWindowing
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmap(on part
+ partition by p_mfgr
+ order by p_name)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 10. testNoopWithMapWithWindowing
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmap(on part
+ partition by p_mfgr
+ order by p_name)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmap
+ order by: p_name
+ output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double
+ partition by: p_mfgr
+ raw input shape:
+ transforms raw input: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Map-side function: true
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int), p_retailprice (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmap
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: sum_window_2
+ arguments: _col7
+ name: sum
+ window function: GenericUDAFSumDouble
+ window frame: PRECEDING(MAX)~
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), sum_window_2 (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmap(on part
+ partition by p_mfgr
+ order by p_name)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmap(on part
+ partition by p_mfgr
+ order by p_name)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15
+Manufacturer#1 al
<TRUNCATED>
[38/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
new file mode 100644
index 0000000..4699e10
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
@@ -0,0 +1,1606 @@
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab_part
+POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab_part
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab
+POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (CUSTOM_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col7
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select count(*)
+from
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (BROADCAST_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: tab_part
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: key (type: int), value (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: int), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 key (type: int)
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*)
+from
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from
+(select distinct key, value from tab_part) a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+242
+PREHOOK: query: -- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table.
+-- In this case the sub-query is chosen as the big table.
+explain
+select a.k1, a.v1, b.value
+from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a
+join tab b on a.k1 = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table.
+-- In this case the sub-query is chosen as the big table.
+explain
+select a.k1, a.v1, b.value
+from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a
+join tab b on a.k1 = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 3 <- Reducer 2 (CUSTOM_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcbucket_mapjoin
+ Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 14 Data size: 1456 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(substr(value, 5))
+ keys: key (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 14 Data size: 1456 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 14 Data size: 1456 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: double)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col1 (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col3
+ input vertices:
+ 0 Reducer 2
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 7 Data size: 728 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: double), _col0 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 7 Data size: 728 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col1 (type: int)
+ Statistics: Num rows: 7 Data size: 728 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select a.k1, a.v1, b.value
+from (select sum(substr(tab.value,5)) as v1, key as k1 from tab_part join tab on tab_part.key = tab.key GROUP BY tab.key) a
+join tab b on a.k1 = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.k1, a.v1, b.value
+from (select sum(substr(tab.value,5)) as v1, key as k1 from tab_part join tab on tab_part.key = tab.key GROUP BY tab.key) a
+join tab b on a.k1 = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (CUSTOM_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: tab_part
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col6, _col7
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: sum(substr(_col7, 5))
+ keys: _col6 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: double)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: tab
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: double), _col0 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col1 (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col3
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select a.k1, a.v1, b.value
+from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab x join tab y on x.key = y.key GROUP BY x.key) a
+join tab_part b on a.k1 = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.k1, a.v1, b.value
+from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab x join tab y on x.key = y.key GROUP BY x.key) a
+join tab_part b on a.k1 = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (CUSTOM_EDGE)
+ Map 4 <- Reducer 2 (CUSTOM_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: sum(substr(_col1, 5))
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: double)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col1 (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col3
+ input vertices:
+ 0 Reducer 2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 66 Data size: 700 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: double), _col0 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 66 Data size: 700 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col1 (type: int)
+ Statistics: Num rows: 66 Data size: 700 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: double)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- multi-way join
+explain
+select a.key, a.value, b.value
+from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- multi-way join
+explain
+select a.key, a.value, b.value
+from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (CUSTOM_EDGE), Map 3 (CUSTOM_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ 2 key (type: int)
+ outputColumnNames: _col0, _col1, _col7
+ input vertices:
+ 1 Map 2
+ 2 Map 3
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select a.key, a.value, c.value
+from (select x.key, x.value from tab_part x join tab y on x.key = y.key) a join tab c on a.key = c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, a.value, c.value
+from (select x.key, x.value from tab_part x join tab y on x.key = y.key) a join tab c on a.key = c.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (CUSTOM_EDGE), Map 3 (CUSTOM_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col3
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- in this case sub-query is the small table
+explain
+select a.key, a.value, b.value
+from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a
+join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- in this case sub-query is the small table
+explain
+select a.key, a.value, b.value
+from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a
+join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 3 <- Reducer 2 (CUSTOM_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcbucket_mapjoin
+ Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 14 Data size: 1456 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(substr(value, 5))
+ keys: key (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 14 Data size: 1456 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 14 Data size: 1456 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: double)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col3
+ input vertices:
+ 0 Reducer 2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 7 Data size: 728 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 7 Data size: 728 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: double)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select a.key, a.value, b.value
+from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a
+join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, a.value, b.value
+from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a
+join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 3 <- Reducer 2 (CUSTOM_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcbucket_mapjoin
+ Statistics: Num rows: 27 Data size: 2808 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 14 Data size: 1456 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 14 Data size: 1456 Basic stats: COMPLETE Column stats: NONE
+ value expressions: substr(value, 5) (type: string)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col3
+ input vertices:
+ 0 Reducer 2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: complete
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 7 Data size: 728 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 7 Data size: 728 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: double)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- join on non-bucketed column results in broadcast join.
+explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.value = b.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- join on non-bucketed column results in broadcast join.
+explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.value = b.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: int)
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 value (type: string)
+ 1 value (type: string)
+ outputColumnNames: _col0, _col1, _col7
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab1
+POSTHOOK: query: CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab1
+PREHOOK: query: insert overwrite table tab1
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab1
+POSTHOOK: query: insert overwrite table tab1
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab1
+POSTHOOK: Lineage: tab1.key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab1.value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select a.key, a.value, b.value
+from tab1 a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, a.value, b.value
+from tab1 a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (CUSTOM_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1, _col6
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (CUSTOM_EDGE), Map 3 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key is not null and value is not null) (type: boolean)
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col1 (type: string)
+ 1 value (type: string)
+ outputColumnNames: _col0, _col12
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col12 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: int)
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.key = b.key and a.ds = b.ds
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.key = b.key and a.ds = b.ds
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int), ds (type: string)
+ sort order: ++
+ Map-reduce partition columns: key (type: int), ds (type: string)
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Select Operator
+ expressions: ds (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: b
+ Partition key expr: ds
+ Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE
+ Target column: ds
+ Target Vertex: Map 2
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int), ds (type: string)
+ 1 key (type: int), ds (type: string)
+ outputColumnNames: _col0, _col1, _col7
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (CUSTOM_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count()
+ mode: complete
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+1
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin where key = 411
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (CUSTOM_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count()
+ mode: complete
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from tab_part a join tab b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+1
[46/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out
new file mode 100644
index 0000000..ae1a8d2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out
@@ -0,0 +1,369 @@
+PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl1
+PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl2
+PREHOOK: query: insert overwrite table tbl1
+select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: insert overwrite table tbl1
+select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl1
+POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table tbl2
+select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: insert overwrite table tbl2
+select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl2
+POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join.
+explain
+select count(*) from
+ (
+ select * from
+ (select a.key as key, a.value as value from tbl1 a where key < 6
+ union all
+ select a.key as key, a.value as value from tbl1 a where key < 6
+ ) usubq1 ) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join.
+explain
+select count(*) from
+ (
+ select * from
+ (select a.key as key, a.value as value from tbl1 a where key < 6
+ union all
+ select a.key as key, a.value as value from tbl1 a where key < 6
+ ) usubq1 ) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 5 (BROADCAST_EDGE), Union 2 (CONTAINS)
+ Map 4 <- Map 5 (BROADCAST_EDGE), Union 2 (CONTAINS)
+ Reducer 3 <- Union 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 5
+ Statistics: Num rows: 6 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 5
+ Statistics: Num rows: 6 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Union 2
+ Vertex: Union 2
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+ (
+ select * from
+ (select a.key as key, a.value as value from tbl1 a where key < 6
+ union all
+ select a.key as key, a.value as value from tbl1 a where key < 6
+ ) usubq1 ) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+ (
+ select * from
+ (select a.key as key, a.value as value from tbl1 a where key < 6
+ union all
+ select a.key as key, a.value as value from tbl1 a where key < 6
+ ) usubq1 ) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+40
+PREHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join.
+explain
+select count(*) from
+ (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join.
+explain
+select count(*) from
+ (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 3 <- Reducer 2 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 4 <- Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ bucketGroup: true
+ keys: key (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 0 Reducer 2
+ Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+ (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+ (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+8
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out
new file mode 100644
index 0000000..3c50dac
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out
@@ -0,0 +1,1485 @@
+PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket
+
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket
+
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 1 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 0 Map 1
+ Position of Big Table: 1
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [b]
+ /bucket_big/ds=2008-04-09 [b]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+38
+PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+-- The tables are only bucketed and not sorted, the join should not be converted
+-- Currenly, a join is only converted to a sort-merge join without a hint, automatic conversion to
+-- bucketized mapjoin is not done
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+-- The tables are only bucketed and not sorted, the join should not be converted
+-- Currenly, a join is only converted to a sort-merge join without a hint, automatic conversion to
+-- bucketized mapjoin is not done
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 1 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 0 Map 1
+ Position of Big Table: 1
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [b]
+ /bucket_big/ds=2008-04-09 [b]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+38
+PREHOOK: query: -- The join is converted to a bucketed mapjoin with a mapjoin hint
+explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is converted to a bucketed mapjoin with a mapjoin hint
+explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_HINTLIST
+ TOK_HINT
+ TOK_MAPJOIN
+ TOK_HINTARGLIST
+ a
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 1 => 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 0 Map 1
+ Position of Big Table: 1
+ Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [b]
+ /bucket_big/ds=2008-04-09 [b]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+38
+PREHOOK: query: -- HIVE-7023
+explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- HIVE-7023
+explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ c
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ c
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_HINTLIST
+ TOK_HINT
+ TOK_MAPJOIN
+ TOK_HINTARGLIST
+ a
+ b
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 114
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ Estimated key counts: Map 1 => 1, Map 4 => 58
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ 2 key (type: string)
+ input vertices:
+ 0 Map 1
+ 2 Map 4
+ Position of Big Table: 1
+ Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [b]
+ /bucket_big/ds=2008-04-09 [b]
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ tag: 2
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [c]
+ /bucket_big/ds=2008-04-09 [c]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+#### A masked pattern was here ####
+180
[10/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/metadata_only_queries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/llap/metadata_only_queries.q.out
new file mode 100644
index 0000000..bacbff3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/metadata_only_queries.q.out
@@ -0,0 +1,504 @@
+PREHOOK: query: create table over10k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary)
+ row format delimited
+ fields terminated by '|'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over10k
+POSTHOOK: query: create table over10k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary)
+ row format delimited
+ fields terminated by '|'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over10k
+PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@over10k
+POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@over10k
+PREHOOK: query: create table stats_tbl(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_tbl
+POSTHOOK: query: create table stats_tbl(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_tbl
+PREHOOK: query: create table stats_tbl_part(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary) partitioned by (dt string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_tbl_part
+POSTHOOK: query: create table stats_tbl_part(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary) partitioned by (dt string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_tbl_part
+PREHOOK: query: insert overwrite table stats_tbl select * from over10k
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+PREHOOK: Output: default@stats_tbl
+POSTHOOK: query: insert overwrite table stats_tbl select * from over10k
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+POSTHOOK: Output: default@stats_tbl
+POSTHOOK: Lineage: stats_tbl.b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl.bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: stats_tbl.bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: stats_tbl.d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: stats_tbl.dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: stats_tbl.f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: stats_tbl.i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_tbl.s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: stats_tbl.si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl.t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl.ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+PREHOOK: query: insert into table stats_tbl_part partition (dt='2010') select * from over10k where t>0 and t<30
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+PREHOOK: Output: default@stats_tbl_part@dt=2010
+POSTHOOK: query: insert into table stats_tbl_part partition (dt='2010') select * from over10k where t>0 and t<30
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+POSTHOOK: Output: default@stats_tbl_part@dt=2010
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2010).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+PREHOOK: query: insert into table stats_tbl_part partition (dt='2011') select * from over10k where t>30 and t<60
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+PREHOOK: Output: default@stats_tbl_part@dt=2011
+POSTHOOK: query: insert into table stats_tbl_part partition (dt='2011') select * from over10k where t>30 and t<60
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+POSTHOOK: Output: default@stats_tbl_part@dt=2011
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2011).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+PREHOOK: query: insert into table stats_tbl_part partition (dt='2012') select * from over10k where t>60
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+PREHOOK: Output: default@stats_tbl_part@dt=2012
+POSTHOOK: query: insert into table stats_tbl_part partition (dt='2012') select * from over10k where t>60
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+POSTHOOK: Output: default@stats_tbl_part@dt=2012
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).b SIMPLE [(over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bin SIMPLE [(over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).bo SIMPLE [(over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).d SIMPLE [(over10k)over10k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).dec SIMPLE [(over10k)over10k.FieldSchema(name:dec, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).f SIMPLE [(over10k)over10k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).i SIMPLE [(over10k)over10k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).s SIMPLE [(over10k)over10k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).si SIMPLE [(over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).t SIMPLE [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: stats_tbl_part PARTITION(dt=2012).ts SIMPLE [(over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+PREHOOK: query: explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: stats_tbl
+ Statistics: Num rows: 9999 Data size: 1030908 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: s (type: string), bo (type: boolean), bin (type: binary), si (type: smallint), i (type: int), b (type: bigint)
+ outputColumnNames: _col2, _col3, _col4, _col5, _col6, _col7
+ Statistics: Num rows: 9999 Data size: 1030908 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(), sum(1), sum(0.2), count(1), count(_col2), count(_col3), count(_col4), count(_col5), max(_col6), min(_col7)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: double), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: int), _col9 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(VALUE._col6), count(VALUE._col7), max(VALUE._col8), min(VALUE._col9)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: stats_tbl_part
+ Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: s (type: string), bo (type: boolean), bin (type: binary), si (type: smallint), i (type: int), b (type: bigint)
+ outputColumnNames: _col2, _col3, _col4, _col5, _col6, _col7
+ Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(), sum(1), sum(0.2), count(1), count(_col2), count(_col3), count(_col4), count(_col5), max(_col6), min(_col7)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: double), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: int), _col9 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(VALUE._col6), count(VALUE._col7), max(VALUE._col8), min(VALUE._col9)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: analyze table stats_tbl compute statistics for columns t,si,i,b,f,d,bo,s,bin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table stats_tbl compute statistics for columns t,si,i,b,f,d,bo,s,bin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl
+#### A masked pattern was here ####
+PREHOOK: query: analyze table stats_tbl_part partition(dt='2010') compute statistics for columns t,si,i,b,f,d,bo,s,bin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
+PREHOOK: Input: default@stats_tbl_part@dt=2010
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table stats_tbl_part partition(dt='2010') compute statistics for columns t,si,i,b,f,d,bo,s,bin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
+POSTHOOK: Input: default@stats_tbl_part@dt=2010
+#### A masked pattern was here ####
+PREHOOK: query: analyze table stats_tbl_part partition(dt='2011') compute statistics for columns t,si,i,b,f,d,bo,s,bin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
+PREHOOK: Input: default@stats_tbl_part@dt=2011
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table stats_tbl_part partition(dt='2011') compute statistics for columns t,si,i,b,f,d,bo,s,bin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
+POSTHOOK: Input: default@stats_tbl_part@dt=2011
+#### A masked pattern was here ####
+PREHOOK: query: analyze table stats_tbl_part partition(dt='2012') compute statistics for columns t,si,i,b,f,d,bo,s,bin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
+PREHOOK: Input: default@stats_tbl_part@dt=2012
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table stats_tbl_part partition(dt='2012') compute statistics for columns t,si,i,b,f,d,bo,s,bin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
+POSTHOOK: Input: default@stats_tbl_part@dt=2012
+#### A masked pattern was here ####
+PREHOOK: query: explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl
+#### A masked pattern was here ####
+9999 9999 1999.8000000000002 9999 9999 9999 9999 9999
+PREHOOK: query: explain
+select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl
+#### A masked pattern was here ####
+POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl
+#### A masked pattern was here ####
+65536 65791 4294967296 4294967551 0.01 99.98 0.01 50.0
+PREHOOK: query: explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
+#### A masked pattern was here ####
+9489 9489 1897.8000000000002 9489 9489 9489 9489 9489
+PREHOOK: query: explain
+select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: 1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_tbl_part
+#### A masked pattern was here ####
+POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_tbl_part
+#### A masked pattern was here ####
+65536 65791 4294967296 4294967551 0.01 99.98 0.01 50.0
+PREHOOK: query: explain select count(ts) from stats_tbl_part
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(ts) from stats_tbl_part
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: stats_tbl_part
+ Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ts (type: timestamp)
+ outputColumnNames: _col0
+ Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col0)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: drop table stats_tbl
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@stats_tbl
+PREHOOK: Output: default@stats_tbl
+POSTHOOK: query: drop table stats_tbl
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@stats_tbl
+POSTHOOK: Output: default@stats_tbl
+PREHOOK: query: drop table stats_tbl_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@stats_tbl_part
+PREHOOK: Output: default@stats_tbl_part
+POSTHOOK: query: drop table stats_tbl_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@stats_tbl_part
+POSTHOOK: Output: default@stats_tbl_part
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/mrr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mrr.q.out b/ql/src/test/results/clientpositive/llap/mrr.q.out
new file mode 100644
index 0000000..f507e4b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/mrr.q.out
@@ -0,0 +1,2311 @@
+PREHOOK: query: -- simple query with multiple reduce stages
+-- SORT_QUERY_RESULTS
+
+EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt
+PREHOOK: type: QUERY
+POSTHOOK: query: -- simple query with multiple reduce stages
+-- SORT_QUERY_RESULTS
+
+EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 3
+10 1
+100 2
+103 2
+104 2
+105 1
+11 1
+111 1
+113 2
+114 1
+116 1
+118 2
+119 3
+12 2
+120 2
+125 2
+126 1
+128 3
+129 2
+131 1
+133 1
+134 2
+136 1
+137 2
+138 4
+143 1
+145 1
+146 2
+149 2
+15 2
+150 1
+152 2
+153 1
+155 1
+156 1
+157 1
+158 1
+160 1
+162 1
+163 1
+164 2
+165 2
+166 1
+167 3
+168 1
+169 4
+17 1
+170 1
+172 2
+174 2
+175 2
+176 2
+177 1
+178 1
+179 2
+18 2
+180 1
+181 1
+183 1
+186 1
+187 3
+189 1
+19 1
+190 1
+191 2
+192 1
+193 3
+194 1
+195 2
+196 1
+197 2
+199 3
+2 1
+20 1
+200 2
+201 1
+202 1
+203 2
+205 2
+207 2
+208 3
+209 2
+213 2
+214 1
+216 2
+217 2
+218 1
+219 2
+221 2
+222 1
+223 2
+224 2
+226 1
+228 1
+229 2
+230 5
+233 2
+235 1
+237 2
+238 2
+239 2
+24 2
+241 1
+242 2
+244 1
+247 1
+248 1
+249 1
+252 1
+255 2
+256 2
+257 1
+258 1
+26 2
+260 1
+262 1
+263 1
+265 2
+266 1
+27 1
+272 2
+273 3
+274 1
+275 1
+277 4
+278 2
+28 1
+280 2
+281 2
+282 2
+283 1
+284 1
+285 1
+286 1
+287 1
+288 2
+289 1
+291 1
+292 1
+296 1
+298 3
+30 1
+302 1
+305 1
+306 1
+307 2
+308 1
+309 2
+310 1
+311 3
+315 1
+316 3
+317 2
+318 3
+321 2
+322 2
+323 1
+325 2
+327 3
+33 1
+331 2
+332 1
+333 2
+335 1
+336 1
+338 1
+339 1
+34 1
+341 1
+342 2
+344 2
+345 1
+348 5
+35 3
+351 1
+353 2
+356 1
+360 1
+362 1
+364 1
+365 1
+366 1
+367 2
+368 1
+369 3
+37 2
+373 1
+374 1
+375 1
+377 1
+378 1
+379 1
+382 2
+384 3
+386 1
+389 1
+392 1
+393 1
+394 1
+395 2
+396 3
+397 2
+399 2
+4 1
+400 1
+401 5
+402 1
+403 3
+404 2
+406 4
+407 1
+409 3
+41 1
+411 1
+413 2
+414 2
+417 3
+418 1
+419 1
+42 2
+421 1
+424 2
+427 1
+429 2
+43 1
+430 3
+431 3
+432 1
+435 1
+436 1
+437 1
+438 3
+439 2
+44 1
+443 1
+444 1
+446 1
+448 1
+449 1
+452 1
+453 1
+454 3
+455 1
+457 1
+458 2
+459 2
+460 1
+462 2
+463 2
+466 3
+467 1
+468 4
+469 5
+47 1
+470 1
+472 1
+475 1
+477 1
+478 2
+479 1
+480 3
+481 1
+482 1
+483 1
+484 1
+485 1
+487 1
+489 4
+490 1
+491 1
+492 2
+493 1
+494 1
+495 1
+496 1
+497 1
+498 3
+5 3
+51 2
+53 1
+54 1
+57 1
+58 2
+64 1
+65 1
+66 1
+67 2
+69 1
+70 3
+72 2
+74 1
+76 2
+77 1
+78 1
+8 1
+80 1
+82 1
+83 2
+84 2
+85 1
+86 1
+87 1
+9 1
+90 3
+92 1
+95 2
+96 1
+97 2
+98 2
+PREHOOK: query: -- join query with multiple reduce stages;
+EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- join query with multiple reduce stages;
+EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string), _col1 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: complete
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint), _col0 (type: string)
+ sort order: ++
+ Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 1
+10 1
+100 1
+103 1
+104 1
+105 1
+11 1
+111 1
+113 1
+114 1
+116 1
+118 1
+119 1
+12 1
+120 1
+125 1
+126 1
+128 1
+129 1
+131 1
+133 1
+134 1
+136 1
+137 1
+138 1
+143 1
+145 1
+146 1
+149 1
+15 1
+150 1
+152 1
+153 1
+155 1
+156 1
+157 1
+158 1
+160 1
+162 1
+163 1
+164 1
+165 1
+166 1
+167 1
+168 1
+169 1
+17 1
+170 1
+172 1
+174 1
+175 1
+176 1
+177 1
+178 1
+179 1
+18 1
+180 1
+181 1
+183 1
+186 1
+187 1
+189 1
+19 1
+190 1
+191 1
+192 1
+193 1
+194 1
+195 1
+196 1
+197 1
+199 1
+2 1
+20 1
+200 1
+201 1
+202 1
+203 1
+205 1
+207 1
+208 1
+209 1
+213 1
+214 1
+216 1
+217 1
+218 1
+219 1
+221 1
+222 1
+223 1
+224 1
+226 1
+228 1
+229 1
+230 1
+233 1
+235 1
+237 1
+238 1
+239 1
+24 1
+241 1
+242 1
+244 1
+247 1
+248 1
+249 1
+252 1
+255 1
+256 1
+257 1
+258 1
+26 1
+260 1
+262 1
+263 1
+265 1
+266 1
+27 1
+272 1
+273 1
+274 1
+275 1
+277 1
+278 1
+28 1
+280 1
+281 1
+282 1
+283 1
+284 1
+285 1
+286 1
+287 1
+288 1
+289 1
+291 1
+292 1
+296 1
+298 1
+30 1
+302 1
+305 1
+306 1
+307 1
+308 1
+309 1
+310 1
+311 1
+315 1
+316 1
+317 1
+318 1
+321 1
+322 1
+323 1
+325 1
+327 1
+33 1
+331 1
+332 1
+333 1
+335 1
+336 1
+338 1
+339 1
+34 1
+341 1
+342 1
+344 1
+345 1
+348 1
+35 1
+351 1
+353 1
+356 1
+360 1
+362 1
+364 1
+365 1
+366 1
+367 1
+368 1
+369 1
+37 1
+373 1
+374 1
+375 1
+377 1
+378 1
+379 1
+382 1
+384 1
+386 1
+389 1
+392 1
+393 1
+394 1
+395 1
+396 1
+397 1
+399 1
+4 1
+400 1
+401 1
+402 1
+403 1
+404 1
+406 1
+407 1
+409 1
+41 1
+411 1
+413 1
+414 1
+417 1
+418 1
+419 1
+42 1
+421 1
+424 1
+427 1
+429 1
+43 1
+430 1
+431 1
+432 1
+435 1
+436 1
+437 1
+438 1
+439 1
+44 1
+443 1
+444 1
+446 1
+448 1
+449 1
+452 1
+453 1
+454 1
+455 1
+457 1
+458 1
+459 1
+460 1
+462 1
+463 1
+466 1
+467 1
+468 1
+469 1
+47 1
+470 1
+472 1
+475 1
+477 1
+478 1
+479 1
+480 1
+481 1
+482 1
+483 1
+484 1
+485 1
+487 1
+489 1
+490 1
+491 1
+492 1
+493 1
+494 1
+495 1
+496 1
+497 1
+498 1
+5 1
+51 1
+53 1
+54 1
+57 1
+58 1
+64 1
+65 1
+66 1
+67 1
+69 1
+70 1
+72 1
+74 1
+76 1
+77 1
+78 1
+8 1
+80 1
+82 1
+83 1
+84 1
+85 1
+86 1
+87 1
+9 1
+90 1
+92 1
+95 1
+96 1
+97 1
+98 1
+PREHOOK: query: -- same query with broadcast join
+EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- same query with broadcast join
+EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 4 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ keys: _col0 (type: string), _col1 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: complete
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint), _col0 (type: string)
+ sort order: ++
+ Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 1
+10 1
+100 1
+103 1
+104 1
+105 1
+11 1
+111 1
+113 1
+114 1
+116 1
+118 1
+119 1
+12 1
+120 1
+125 1
+126 1
+128 1
+129 1
+131 1
+133 1
+134 1
+136 1
+137 1
+138 1
+143 1
+145 1
+146 1
+149 1
+15 1
+150 1
+152 1
+153 1
+155 1
+156 1
+157 1
+158 1
+160 1
+162 1
+163 1
+164 1
+165 1
+166 1
+167 1
+168 1
+169 1
+17 1
+170 1
+172 1
+174 1
+175 1
+176 1
+177 1
+178 1
+179 1
+18 1
+180 1
+181 1
+183 1
+186 1
+187 1
+189 1
+19 1
+190 1
+191 1
+192 1
+193 1
+194 1
+195 1
+196 1
+197 1
+199 1
+2 1
+20 1
+200 1
+201 1
+202 1
+203 1
+205 1
+207 1
+208 1
+209 1
+213 1
+214 1
+216 1
+217 1
+218 1
+219 1
+221 1
+222 1
+223 1
+224 1
+226 1
+228 1
+229 1
+230 1
+233 1
+235 1
+237 1
+238 1
+239 1
+24 1
+241 1
+242 1
+244 1
+247 1
+248 1
+249 1
+252 1
+255 1
+256 1
+257 1
+258 1
+26 1
+260 1
+262 1
+263 1
+265 1
+266 1
+27 1
+272 1
+273 1
+274 1
+275 1
+277 1
+278 1
+28 1
+280 1
+281 1
+282 1
+283 1
+284 1
+285 1
+286 1
+287 1
+288 1
+289 1
+291 1
+292 1
+296 1
+298 1
+30 1
+302 1
+305 1
+306 1
+307 1
+308 1
+309 1
+310 1
+311 1
+315 1
+316 1
+317 1
+318 1
+321 1
+322 1
+323 1
+325 1
+327 1
+33 1
+331 1
+332 1
+333 1
+335 1
+336 1
+338 1
+339 1
+34 1
+341 1
+342 1
+344 1
+345 1
+348 1
+35 1
+351 1
+353 1
+356 1
+360 1
+362 1
+364 1
+365 1
+366 1
+367 1
+368 1
+369 1
+37 1
+373 1
+374 1
+375 1
+377 1
+378 1
+379 1
+382 1
+384 1
+386 1
+389 1
+392 1
+393 1
+394 1
+395 1
+396 1
+397 1
+399 1
+4 1
+400 1
+401 1
+402 1
+403 1
+404 1
+406 1
+407 1
+409 1
+41 1
+411 1
+413 1
+414 1
+417 1
+418 1
+419 1
+42 1
+421 1
+424 1
+427 1
+429 1
+43 1
+430 1
+431 1
+432 1
+435 1
+436 1
+437 1
+438 1
+439 1
+44 1
+443 1
+444 1
+446 1
+448 1
+449 1
+452 1
+453 1
+454 1
+455 1
+457 1
+458 1
+459 1
+460 1
+462 1
+463 1
+466 1
+467 1
+468 1
+469 1
+47 1
+470 1
+472 1
+475 1
+477 1
+478 1
+479 1
+480 1
+481 1
+482 1
+483 1
+484 1
+485 1
+487 1
+489 1
+490 1
+491 1
+492 1
+493 1
+494 1
+495 1
+496 1
+497 1
+498 1
+5 1
+51 1
+53 1
+54 1
+57 1
+58 1
+64 1
+65 1
+66 1
+67 1
+69 1
+70 1
+72 1
+74 1
+76 1
+77 1
+78 1
+8 1
+80 1
+82 1
+83 1
+84 1
+85 1
+86 1
+87 1
+9 1
+90 1
+92 1
+95 1
+96 1
+97 1
+98 1
+PREHOOK: query: -- query with multiple branches in the task dag
+EXPLAIN
+SELECT *
+FROM
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s1
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s2
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s3
+ ON (s1.key = s2.key and s1.key = s3.key)
+WHERE
+ s1.cnt > 1
+ORDER BY s1.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- query with multiple branches in the task dag
+EXPLAIN
+SELECT *
+FROM
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s1
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s2
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s3
+ ON (s1.key = s2.key and s1.key = s3.key)
+WHERE
+ s1.cnt > 1
+ORDER BY s1.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 10 <- Map 9 (SIMPLE_EDGE)
+ Reducer 11 <- Reducer 10 (SIMPLE_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 11 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+ Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
+ Reducer 7 <- Map 6 (SIMPLE_EDGE)
+ Reducer 8 <- Reducer 7 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 9
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Reducer 10
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint)
+ sort order: +
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Reducer 11
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint)
+ sort order: +
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 4
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ 2 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col3, _col4, _col5
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col3 (type: bigint), _col1 (type: bigint), _col4 (type: string), _col5 (type: bigint)
+ outputColumnNames: _col0, _col1, _col3, _col4, _col5
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint)
+ Reducer 5
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), KEY.reducesinkkey0 (type: string), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 7
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (_col1 > 1) (type: boolean)
+ Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint)
+ sort order: +
+ Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Reducer 8
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT *
+FROM
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s1
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s2
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s3
+ ON (s1.key = s2.key and s1.key = s3.key)
+WHERE
+ s1.cnt > 1
+ORDER BY s1.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT *
+FROM
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s1
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s2
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s3
+ ON (s1.key = s2.key and s1.key = s3.key)
+WHERE
+ s1.cnt > 1
+ORDER BY s1.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 3 0 3 0 3
+100 2 100 2 100 2
+103 2 103 2 103 2
+104 2 104 2 104 2
+113 2 113 2 113 2
+118 2 118 2 118 2
+119 3 119 3 119 3
+12 2 12 2 12 2
+120 2 120 2 120 2
+125 2 125 2 125 2
+128 3 128 3 128 3
+129 2 129 2 129 2
+134 2 134 2 134 2
+137 2 137 2 137 2
+138 4 138 4 138 4
+146 2 146 2 146 2
+149 2 149 2 149 2
+15 2 15 2 15 2
+152 2 152 2 152 2
+164 2 164 2 164 2
+165 2 165 2 165 2
+167 3 167 3 167 3
+169 4 169 4 169 4
+172 2 172 2 172 2
+174 2 174 2 174 2
+175 2 175 2 175 2
+176 2 176 2 176 2
+179 2 179 2 179 2
+18 2 18 2 18 2
+187 3 187 3 187 3
+191 2 191 2 191 2
+193 3 193 3 193 3
+195 2 195 2 195 2
+197 2 197 2 197 2
+199 3 199 3 199 3
+200 2 200 2 200 2
+203 2 203 2 203 2
+205 2 205 2 205 2
+207 2 207 2 207 2
+208 3 208 3 208 3
+209 2 209 2 209 2
+213 2 213 2 213 2
+216 2 216 2 216 2
+217 2 217 2 217 2
+219 2 219 2 219 2
+221 2 221 2 221 2
+223 2 223 2 223 2
+224 2 224 2 224 2
+229 2 229 2 229 2
+230 5 230 5 230 5
+233 2 233 2 233 2
+237 2 237 2 237 2
+238 2 238 2 238 2
+239 2 239 2 239 2
+24 2 24 2 24 2
+242 2 242 2 242 2
+255 2 255 2 255 2
+256 2 256 2 256 2
+26 2 26 2 26 2
+265 2 265 2 265 2
+272 2 272 2 272 2
+273 3 273 3 273 3
+277 4 277 4 277 4
+278 2 278 2 278 2
+280 2 280 2 280 2
+281 2 281 2 281 2
+282 2 282 2 282 2
+288 2 288 2 288 2
+298 3 298 3 298 3
+307 2 307 2 307 2
+309 2 309 2 309 2
+311 3 311 3 311 3
+316 3 316 3 316 3
+317 2 317 2 317 2
+318 3 318 3 318 3
+321 2 321 2 321 2
+322 2 322 2 322 2
+325 2 325 2 325 2
+327 3 327 3 327 3
+331 2 331 2 331 2
+333 2 333 2 333 2
+342 2 342 2 342 2
+344 2 344 2 344 2
+348 5 348 5 348 5
+35 3 35 3 35 3
+353 2 353 2 353 2
+367 2 367 2 367 2
+369 3 369 3 369 3
+37 2 37 2 37 2
+382 2 382 2 382 2
+384 3 384 3 384 3
+395 2 395 2 395 2
+396 3 396 3 396 3
+397 2 397 2 397 2
+399 2 399 2 399 2
+401 5 401 5 401 5
+403 3 403 3 403 3
+404 2 404 2 404 2
+406 4 406 4 406 4
+409 3 409 3 409 3
+413 2 413 2 413 2
+414 2 414 2 414 2
+417 3 417 3 417 3
+42 2 42 2 42 2
+424 2 424 2 424 2
+429 2 429 2 429 2
+430 3 430 3 430 3
+431 3 431 3 431 3
+438 3 438 3 438 3
+439 2 439 2 439 2
+454 3 454 3 454 3
+458 2 458 2 458 2
+459 2 459 2 459 2
+462 2 462 2 462 2
+463 2 463 2 463 2
+466 3 466 3 466 3
+468 4 468 4 468 4
+469 5 469 5 469 5
+478 2 478 2 478 2
+480 3 480 3 480 3
+489 4 489 4 489 4
+492 2 492 2 492 2
+498 3 498 3 498 3
+5 3 5 3 5 3
+51 2 51 2 51 2
+58 2 58 2 58 2
+67 2 67 2 67 2
+70 3 70 3 70 3
+72 2 72 2 72 2
+76 2 76 2 76 2
+83 2 83 2 83 2
+84 2 84 2 84 2
+90 3 90 3 90 3
+95 2 95 2 95 2
+97 2 97 2 97 2
+98 2 98 2 98 2
+PREHOOK: query: -- query with broadcast join in the reduce stage
+EXPLAIN
+SELECT *
+FROM
+ (SELECT key, count(value) as cnt FROM src GROUP BY key) s1
+ JOIN src ON (s1.key = src.key) order by s1.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- query with broadcast join in the reduce stage
+EXPLAIN
+SELECT *
+FROM
+ (SELECT key, count(value) as cnt FROM src GROUP BY key) s1
+ JOIN src ON (s1.key = src.key) order by s1.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 3 <- Reducer 2 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 4 <- Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ input vertices:
+ 0 Reducer 2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: string), VALUE._col2 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT *
+FROM
+ (SELECT key, count(value) as cnt FROM src GROUP BY key) s1
+ JOIN src ON (s1.key = src.key) order by s1.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT *
+FROM
+ (SELECT key, count(value) as cnt FROM src GROUP BY key) s1
+ JOIN src ON (s1.key = src.key) order by s1.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 3 0 val_0
+0 3 0 val_0
+0 3 0 val_0
+10 1 10 val_10
+100 2 100 val_100
+100 2 100 val_100
+103 2 103 val_103
+103 2 103 val_103
+104 2 104 val_104
+104 2 104 val_104
+105 1 105 val_105
+11 1 11 val_11
+111 1 111 val_111
+113 2 113 val_113
+113 2 113 val_113
+114 1 114 val_114
+116 1 116 val_116
+118 2 118 val_118
+118 2 118 val_118
+119 3 119 val_119
+119 3 119 val_119
+119 3 119 val_119
+12 2 12 val_12
+12 2 12 val_12
+120 2 120 val_120
+120 2 120 val_120
+125 2 125 val_125
+125 2 125 val_125
+126 1 126 val_126
+128 3 128 val_128
+128 3 128 val_128
+128 3 128 val_128
+129 2 129 val_129
+129 2 129 val_129
+131 1 131 val_131
+133 1 133 val_133
+134 2 134 val_134
+134 2 134 val_134
+136 1 136 val_136
+137 2 137 val_137
+137 2 137 val_137
+138 4 138 val_138
+138 4 138 val_138
+138 4 138 val_138
+138 4 138 val_138
+143 1 143 val_143
+145 1 145 val_145
+146 2 146 val_146
+146 2 146 val_146
+149 2 149 val_149
+149 2 149 val_149
+15 2 15 val_15
+15 2 15 val_15
+150 1 150 val_150
+152 2 152 val_152
+152 2 152 val_152
+153 1 153 val_153
+155 1 155 val_155
+156 1 156 val_156
+157 1 157 val_157
+158 1 158 val_158
+160 1 160 val_160
+162 1 162 val_162
+163 1 163 val_163
+164 2 164 val_164
+164 2 164 val_164
+165 2 165 val_165
+165 2 165 val_165
+166 1 166 val_166
+167 3 167 val_167
+167 3 167 val_167
+167 3 167 val_167
+168 1 168 val_168
+169 4 169 val_169
+169 4 169 val_169
+169 4 169 val_169
+169 4 169 val_169
+17 1 17 val_17
+170 1 170 val_170
+172 2 172 val_172
+172 2 172 val_172
+174 2 174 val_174
+174 2 174 val_174
+175 2 175 val_175
+175 2 175 val_175
+176 2 176 val_176
+176 2 176 val_176
+177 1 177 val_177
+178 1 178 val_178
+179 2 179 val_179
+179 2 179 val_179
+18 2 18 val_18
+18 2 18 val_18
+180 1 180 val_180
+181 1 181 val_181
+183 1 183 val_183
+186 1 186 val_186
+187 3 187 val_187
+187 3 187 val_187
+187 3 187 val_187
+189 1 189 val_189
+19 1 19 val_19
+190 1 190 val_190
+191 2 191 val_191
+191 2 191 val_191
+192 1 192 val_192
+193 3 193 val_193
+193 3 193 val_193
+193 3 193 val_193
+194 1 194 val_194
+195 2 195 val_195
+195 2 195 val_195
+196 1 196 val_196
+197 2 197 val_197
+197 2 197 val_197
+199 3 199 val_199
+199 3 199 val_199
+199 3 199 val_199
+2 1 2 val_2
+20 1 20 val_20
+200 2 200 val_200
+200 2 200 val_200
+201 1 201 val_201
+202 1 202 val_202
+203 2 203 val_203
+203 2 203 val_203
+205 2 205 val_205
+205 2 205 val_205
+207 2 207 val_207
+207 2 207 val_207
+208 3 208 val_208
+208 3 208 val_208
+208 3 208 val_208
+209 2 209 val_209
+209 2 209 val_209
+213 2 213 val_213
+213 2 213 val_213
+214 1 214 val_214
+216 2 216 val_216
+216 2 216 val_216
+217 2 217 val_217
+217 2 217 val_217
+218 1 218 val_218
+219 2 219 val_219
+219 2 219 val_219
+221 2 221 val_221
+221 2 221 val_221
+222 1 222 val_222
+223 2 223 val_223
+223 2 223 val_223
+224 2 224 val_224
+224 2 224 val_224
+226 1 226 val_226
+228 1 228 val_228
+229 2 229 val_229
+229 2 229 val_229
+230 5 230 val_230
+230 5 230 val_230
+230 5 230 val_230
+230 5 230 val_230
+230 5 230 val_230
+233 2 233 val_233
+233 2 233 val_233
+235 1 235 val_235
+237 2 237 val_237
+237 2 237 val_237
+238 2 238 val_238
+238 2 238 val_238
+239 2 239 val_239
+239 2 239 val_239
+24 2 24 val_24
+24 2 24 val_24
+241 1 241 val_241
+242 2 242 val_242
+242 2 242 val_242
+244 1 244 val_244
+247 1 247 val_247
+248 1 248 val_248
+249 1 249 val_249
+252 1 252 val_252
+255 2 255 val_255
+255 2 255 val_255
+256 2 256 val_256
+256 2 256 val_256
+257 1 257 val_257
+258 1 258 val_258
+26 2 26 val_26
+26 2 26 val_26
+260 1 260 val_260
+262 1 262 val_262
+263 1 263 val_263
+265 2 265 val_265
+265 2 265 val_265
+266 1 266 val_266
+27 1 27 val_27
+272 2 272 val_272
+272 2 272 val_272
+273 3 273 val_273
+273 3 273 val_273
+273 3 273 val_273
+274 1 274 val_274
+275 1 275 val_275
+277 4 277 val_277
+277 4 277 val_277
+277 4 277 val_277
+277 4 277 val_277
+278 2 278 val_278
+278 2 278 val_278
+28 1 28 val_28
+280 2 280 val_280
+280 2 280 val_280
+281 2 281 val_281
+281 2 281 val_281
+282 2 282 val_282
+282 2 282 val_282
+283 1 283 val_283
+284 1 284 val_284
+285 1 285 val_285
+286 1 286 val_286
+287 1 287 val_287
+288 2 288 val_288
+288 2 288 val_288
+289 1 289 val_289
+291 1 291 val_291
+292 1 292 val_292
+296 1 296 val_296
+298 3 298 val_298
+298 3 298 val_298
+298 3 298 val_298
+30 1 30 val_30
+302 1 302 val_302
+305 1 305 val_305
+306 1 306 val_306
+307 2 307 val_307
+307 2 307 val_307
+308 1 308 val_308
+309 2 309 val_309
+309 2 309 val_309
+310 1 310 val_310
+311 3 311 val_311
+311 3 311 val_311
+311 3 311 val_311
+315 1 315 val_315
+316 3 316 val_316
+316 3 316 val_316
+316 3 316 val_316
+317 2 317 val_317
+317 2 317 val_317
+318 3 318 val_318
+318 3 318 val_318
+318 3 318 val_318
+321 2 321 val_321
+321 2 321 val_321
+322 2 322 val_322
+322 2 322 val_322
+323 1 323 val_323
+325 2 325 val_325
+325 2 325 val_325
+327 3 327 val_327
+327 3 327 val_327
+327 3 327 val_327
+33 1 33 val_33
+331 2 331 val_331
+331 2 331 val_331
+332 1 332 val_332
+333 2 333 val_333
+333 2 333 val_333
+335 1 335 val_335
+336 1 336 val_336
+338 1 338 val_338
+339 1 339 val_339
+34 1 34 val_34
+341 1 341 val_341
+342 2 342 val_342
+342 2 342 val_342
+344 2 344 val_344
+344 2 344 val_344
+345 1 345 val_345
+348 5 348 val_348
+348 5 348 val_348
+348 5 348 val_348
+348 5 348 val_348
+348 5 348 val_348
+35 3 35 val_35
+35 3 35 val_35
+35 3 35 val_35
+351 1 351 val_351
+353 2 353 val_353
+353 2 353 val_353
+356 1 356 val_356
+360 1 360 val_360
+362 1 362 val_362
+364 1 364 val_364
+365 1 365 val_365
+366 1 366 val_366
+367 2 367 val_367
+367 2 367 val_367
+368 1 368 val_368
+369 3 369 val_369
+369 3 369 val_369
+369 3 369 val_369
+37 2 37 val_37
+37 2 37 val_37
+373 1 373 val_373
+374 1 374 val_374
+375 1 375 val_375
+377 1 377 val_377
+378 1 378 val_378
+379 1 379 val_379
+382 2 382 val_382
+382 2 382 val_382
+384 3 384 val_384
+384 3 384 val_384
+384 3 384 val_384
+386 1 386 val_386
+389 1 389 val_389
+392 1 392 val_392
+393 1 393 val_393
+394 1 394 val_394
+395 2 395 val_395
+395 2 395 val_395
+396 3 396 val_396
+396 3 396 val_396
+396 3 396 val_396
+397 2 397 val_397
+397 2 397 val_397
+399 2 399 val_399
+399 2 399 val_399
+4 1 4 val_4
+400 1 400 val_400
+401 5 401 val_401
+401 5 401 val_401
+401 5 401 val_401
+401 5 401 val_401
+401 5 401 val_401
+402 1 402 val_402
+403 3 403 val_403
+403 3 403 val_403
+403 3 403 val_403
+404 2 404 val_404
+404 2 404 val_404
+406 4 406 val_406
+406 4 406 val_406
+406 4 406 val_406
+406 4 406 val_406
+407 1 407 val_407
+409 3 409 val_409
+409 3 409 val_409
+409 3 409 val_409
+41 1 41 val_41
+411 1 411 val_411
+413 2 413 val_413
+413 2 413 val_413
+414 2 414 val_414
+414 2 414 val_414
+417 3 417 val_417
+417 3 417 val_417
+417 3 417 val_417
+418 1 418 val_418
+419 1 419 val_419
+42 2 42 val_42
+42 2 42 val_42
+421 1 421 val_421
+424 2 424 val_424
+424 2 424 val_424
+427 1 427 val_427
+429 2 429 val_429
+429 2 429 val_429
+43 1 43 val_43
+430 3 430 val_430
+430 3 430 val_430
+430 3 430 val_430
+431 3 431 val_431
+431 3 431 val_431
+431 3 431 val_431
+432 1 432 val_432
+435 1 435 val_435
+436 1 436 val_436
+437 1 437 val_437
+438 3 438 val_438
+438 3 438 val_438
+438 3 438 val_438
+439 2 439 val_439
+439 2 439 val_439
+44 1 44 val_44
+443 1 443 val_443
+444 1 444 val_444
+446 1 446 val_446
+448 1 448 val_448
+449 1 449 val_449
+452 1 452 val_452
+453 1 453 val_453
+454 3 454 val_454
+454 3 454 val_454
+454 3 454 val_454
+455 1 455 val_455
+457 1 457 val_457
+458 2 458 val_458
+458 2 458 val_458
+459 2 459 val_459
+459 2 459 val_459
+460 1 460 val_460
+462 2 462 val_462
+462 2 462 val_462
+463 2 463 val_463
+463 2 463 val_463
+466 3 466 val_466
+466 3 466 val_466
+466 3 466 val_466
+467 1 467 val_467
+468 4 468 val_468
+468 4 468 val_468
+468 4 468 val_468
+468 4 468 val_468
+469 5 469 val_469
+469 5 469 val_469
+469 5 469 val_469
+469 5 469 val_469
+469 5 469 val_469
+47 1 47 val_47
+470 1 470 val_470
+472 1 472 val_472
+475 1 475 val_475
+477 1 477 val_477
+478 2 478 val_478
+478 2 478 val_478
+479 1 479 val_479
+480 3 480 val_480
+480 3 480 val_480
+480 3 480 val_480
+481 1 481 val_481
+482 1 482 val_482
+483 1 483 val_483
+484 1 484 val_484
+485 1 485 val_485
+487 1 487 val_487
+489 4 489 val_489
+489 4 489 val_489
+489 4 489 val_489
+489 4 489 val_489
+490 1 490 val_490
+491 1 491 val_491
+492 2 492 val_492
+492 2 492 val_492
+493 1 493 val_493
+494 1 494 val_494
+495 1 495 val_495
+496 1 496 val_496
+497 1 497 val_497
+498 3 498 val_498
+498 3 498 val_498
+498 3 498 val_498
+5 3 5 val_5
+5 3 5 val_5
+5 3 5 val_5
+51 2 51 val_51
+51 2 51 val_51
+53 1 53 val_53
+54 1 54 val_54
+57 1 57 val_57
+58 2 58 val_58
+58 2 58 val_58
+64 1 64 val_64
+65 1 65 val_65
+66 1 66 val_66
+67 2 67 val_67
+67 2 67 val_67
+69 1 69 val_69
+70 3 70 val_70
+70 3 70 val_70
+70 3 70 val_70
+72 2 72 val_72
+72 2 72 val_72
+74 1 74 val_74
+76 2 76 val_76
+76 2 76 val_76
+77 1 77 val_77
+78 1 78 val_78
+8 1 8 val_8
+80 1 80 val_80
+82 1 82 val_82
+83 2 83 val_83
+83 2 83 val_83
+84 2 84 val_84
+84 2 84 val_84
+85 1 85 val_85
+86 1 86 val_86
+87 1 87 val_87
+9 1 9 val_9
+90 3 90 val_90
+90 3 90 val_90
+90 3 90 val_90
+92 1 92 val_92
+95 2 95 val_95
+95 2 95 val_95
+96 1 96 val_96
+97 2 97 val_97
+97 2 97 val_97
+98 2 98 val_98
+98 2 98 val_98
[51/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
HIVE-11776: LLAP: Generate golden files for all MiniLlapCluster tests (Prasanth Jayachandran)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ace87818
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ace87818
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ace87818
Branch: refs/heads/llap
Commit: ace87818b3808a88cc23cfccd96858250a693da4
Parents: 75b1e6d
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Tue Sep 15 15:41:31 2015 -0500
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Tue Sep 15 15:41:31 2015 -0500
----------------------------------------------------------------------
.../llap/acid_vectorization.q.out | 62 +
.../llap/acid_vectorization_project.q.out | 73 +
.../clientpositive/llap/alter_merge_2_orc.q.out | 123 +
.../clientpositive/llap/alter_merge_orc.q.out | 263 +
.../llap/alter_merge_stats_orc.q.out | 382 +
.../clientpositive/llap/auto_join0.q.out | 172 +
.../clientpositive/llap/auto_join1.q.out | 125 +
.../clientpositive/llap/auto_join21.q.out | 615 +
.../clientpositive/llap/auto_join29.q.out | 3556 ++++
.../clientpositive/llap/auto_join30.q.out | 1361 ++
.../clientpositive/llap/auto_join_filters.q.out | 540 +
.../llap/auto_sortmerge_join_1.q.out | 1034 ++
.../llap/auto_sortmerge_join_10.q.out | 369 +
.../llap/auto_sortmerge_join_11.q.out | 1485 ++
.../llap/auto_sortmerge_join_12.q.out | 645 +
.../llap/auto_sortmerge_join_13.q.out | 692 +
.../llap/auto_sortmerge_join_14.q.out | 224 +
.../llap/auto_sortmerge_join_15.q.out | 188 +
.../llap/auto_sortmerge_join_16.q.out | 256 +
.../llap/auto_sortmerge_join_2.q.out | 707 +
.../llap/auto_sortmerge_join_3.q.out | 1014 ++
.../llap/auto_sortmerge_join_4.q.out | 1030 ++
.../llap/auto_sortmerge_join_5.q.out | 780 +
.../llap/auto_sortmerge_join_7.q.out | 1200 ++
.../llap/auto_sortmerge_join_8.q.out | 1202 ++
.../llap/auto_sortmerge_join_9.q.out | 3521 ++++
.../results/clientpositive/llap/bucket2.q.out | 473 +
.../results/clientpositive/llap/bucket3.q.out | 498 +
.../results/clientpositive/llap/bucket4.q.out | 472 +
.../llap/bucket_map_join_tez1.q.out | 1606 ++
.../llap/bucket_map_join_tez2.q.out | 686 +
.../results/clientpositive/llap/cbo_gby.q.out | 124 +
.../clientpositive/llap/cbo_gby_empty.q.out | 77 +
.../results/clientpositive/llap/cbo_join.q.out | 15028 +++++++++++++++++
.../results/clientpositive/llap/cbo_limit.q.out | 90 +
.../clientpositive/llap/cbo_semijoin.q.out | 440 +
.../clientpositive/llap/cbo_simple_select.q.out | 755 +
.../results/clientpositive/llap/cbo_stats.q.out | 14 +
.../clientpositive/llap/cbo_subq_exists.q.out | 297 +
.../clientpositive/llap/cbo_subq_in.q.out | 151 +
.../clientpositive/llap/cbo_subq_not_in.q.out | 365 +
.../clientpositive/llap/cbo_udf_udaf.q.out | 125 +
.../results/clientpositive/llap/cbo_union.q.out | 920 +
.../results/clientpositive/llap/cbo_views.q.out | 237 +
.../clientpositive/llap/cbo_windowing.q.out | 293 +
.../llap/correlationoptimizer1.q.out | 3084 ++++
.../results/clientpositive/llap/count.q.out | 298 +
.../llap/create_merge_compressed.q.out | 138 +
.../clientpositive/llap/cross_join.q.out | 214 +
.../llap/cross_product_check_1.q.out | 575 +
.../llap/cross_product_check_2.q.out | 534 +
.../test/results/clientpositive/llap/ctas.q.out | 930 +
.../llap/custom_input_output_format.q.out | 102 +
.../llap/delete_all_non_partitioned.q.out | 52 +
.../llap/delete_all_partitioned.q.out | 86 +
.../clientpositive/llap/delete_orig_table.q.out | 61 +
.../clientpositive/llap/delete_tmp_table.q.out | 60 +
.../llap/delete_where_no_match.q.out | 62 +
.../llap/delete_where_non_partitioned.q.out | 61 +
.../llap/delete_where_partitioned.q.out | 105 +
.../llap/delete_whole_partition.q.out | 92 +
.../llap/disable_merge_for_bucketing.q.out | 502 +
.../llap/dynamic_partition_pruning.q.out | 5352 ++++++
.../llap/dynamic_partition_pruning_2.q.out | 1114 ++
.../llap/dynpart_sort_opt_vectorization.q.out | 2626 +++
.../llap/dynpart_sort_optimization.q.out | 2411 +++
.../llap/dynpart_sort_optimization2.q.out | 1844 ++
.../clientpositive/llap/enforce_order.q.out | 80 +
.../clientpositive/llap/explainuser_2.q.out | 5519 ++++++
.../clientpositive/llap/explainuser_3.q.out | 522 +
.../llap/filter_join_breaktask.q.out | 445 +
.../llap/filter_join_breaktask2.q.out | 272 +
.../results/clientpositive/llap/groupby1.q.out | 428 +
.../results/clientpositive/llap/groupby2.q.out | 133 +
.../results/clientpositive/llap/groupby3.q.out | 158 +
.../results/clientpositive/llap/having.q.out | 1298 ++
.../llap/hybridgrace_hashjoin_1.q.out | 1617 ++
.../llap/hybridgrace_hashjoin_2.q.out | 1477 ++
.../llap/insert_acid_dynamic_partition.q.out | 48 +
.../llap/insert_acid_not_bucketed.q.out | 36 +
.../clientpositive/llap/insert_into1.q.out | 381 +
.../clientpositive/llap/insert_into2.q.out | 440 +
.../clientpositive/llap/insert_orig_table.q.out | 80 +
.../llap/insert_update_delete.q.out | 78 +
.../llap/insert_values_acid_not_bucketed.q.out | 28 +
.../insert_values_dynamic_partitioned.q.out | 45 +
.../llap/insert_values_non_partitioned.q.out | 70 +
.../llap/insert_values_orig_table.q.out | 82 +
.../llap/insert_values_partitioned.q.out | 66 +
.../llap/insert_values_tmp_table.q.out | 36 +
.../clientpositive/llap/join0.q.java1.7.out | 242 +
.../results/clientpositive/llap/join1.q.out | 1158 ++
.../clientpositive/llap/join_nullsafe.q.out | 1667 ++
.../clientpositive/llap/leftsemijoin.q.out | 114 +
.../clientpositive/llap/limit_pushdown.q.out | 1487 ++
.../clientpositive/llap/llapdecider.q.out | 1195 ++
.../clientpositive/llap/load_dyn_part1.q.out | 2215 +++
.../clientpositive/llap/load_dyn_part2.q.out | 2152 +++
.../clientpositive/llap/load_dyn_part3.q.out | 2138 +++
.../clientpositive/llap/lvj_mapjoin.q.out | 296 +
.../clientpositive/llap/mapjoin_decimal.q.out | 393 +
.../clientpositive/llap/mapjoin_mapjoin.q.out | 825 +
.../clientpositive/llap/mapreduce1.q.out | 621 +
.../clientpositive/llap/mapreduce2.q.out | 616 +
.../results/clientpositive/llap/merge1.q.out | 596 +
.../results/clientpositive/llap/merge2.q.out | 596 +
.../results/clientpositive/llap/mergejoin.q.out | 3150 ++++
.../llap/metadata_only_queries.q.out | 504 +
.../test/results/clientpositive/llap/mrr.q.out | 2311 +++
.../clientpositive/llap/orc_analyze.q.out | 1726 ++
.../clientpositive/llap/orc_merge1.q.out | 500 +
.../clientpositive/llap/orc_merge2.q.out | 231 +
.../clientpositive/llap/orc_merge3.q.out | 170 +
.../clientpositive/llap/orc_merge4.q.out | 186 +
.../clientpositive/llap/orc_merge5.q.out | 344 +
.../clientpositive/llap/orc_merge6.q.out | 518 +
.../clientpositive/llap/orc_merge7.q.out | 629 +
.../clientpositive/llap/orc_merge8.q.out | 130 +
.../clientpositive/llap/orc_merge9.q.out | 186 +
.../llap/orc_merge_incompat1.q.out | 245 +
.../llap/orc_merge_incompat2.q.out | 375 +
.../clientpositive/llap/orc_ppd_basic.q.out | 701 +
.../llap/orc_vectorization_ppd.q.out | 288 +
.../results/clientpositive/llap/parallel.q.out | 1444 ++
.../test/results/clientpositive/llap/ptf.q.out | 4895 ++++++
.../clientpositive/llap/ptf_matchpath.q.out | 403 +
.../clientpositive/llap/ptf_streaming.q.out | 2640 +++
.../results/clientpositive/llap/sample1.q.out | 727 +
.../clientpositive/llap/script_env_var1.q.out | 18 +
.../clientpositive/llap/script_env_var2.q.out | 16 +
.../clientpositive/llap/script_pipe.q.out | 126 +
.../clientpositive/llap/scriptfile1.q.out | 53 +
.../llap/selectDistinctStar.q.out | 4918 ++++++
.../llap/select_dummy_source.q.out | 229 +
.../results/clientpositive/llap/skewjoin.q.out | 1195 ++
.../clientpositive/llap/stats_counter.q.out | 102 +
.../llap/stats_counter_partitioned.q.out | 465 +
.../clientpositive/llap/stats_noscan_1.q.out | 520 +
.../clientpositive/llap/stats_only_null.q.out | 422 +
.../clientpositive/llap/subquery_exists.q.out | 214 +
.../clientpositive/llap/subquery_in.q.out | 969 ++
.../clientpositive/llap/temp_table.q.out | 469 +
.../llap/tez_bmj_schema_evolution.q.out | 2214 +++
.../results/clientpositive/llap/tez_dml.q.out | 1526 ++
.../llap/tez_dynpart_hashjoin_1.q.out | 817 +
.../llap/tez_dynpart_hashjoin_2.q.out | 579 +
.../clientpositive/llap/tez_fsstat.q.out | 102 +
...tez_insert_overwrite_local_directory_1.q.out | 20 +
.../results/clientpositive/llap/tez_join.q.out | 150 +
.../clientpositive/llap/tez_join_hash.q.out | 980 ++
.../llap/tez_join_result_complex.q.out | 2163 +++
.../clientpositive/llap/tez_join_tests.q.out | 2227 +++
.../clientpositive/llap/tez_joins_explain.q.out | 715 +
.../clientpositive/llap/tez_multi_union.q.out | 833 +
.../llap/tez_schema_evolution.q.out | 114 +
.../clientpositive/llap/tez_self_join.q.out | 210 +
.../results/clientpositive/llap/tez_smb_1.q.out | 616 +
.../clientpositive/llap/tez_smb_main.q.out | 1422 ++
.../results/clientpositive/llap/tez_union.q.out | 1438 ++
.../clientpositive/llap/tez_union2.q.out | 820 +
.../clientpositive/llap/tez_union_decimal.q.out | 101 +
.../llap/tez_union_dynamic_partition.q.out | 158 +
.../llap/tez_union_group_by.q.out | 410 +
.../llap/tez_union_multiinsert.q.out | 4399 +++++
.../llap/tez_vector_dynpart_hashjoin_1.q.out | 817 +
.../llap/tez_vector_dynpart_hashjoin_2.q.out | 579 +
.../clientpositive/llap/transform1.q.out | 138 +
.../clientpositive/llap/transform2.q.out | 11 +
.../clientpositive/llap/transform_ppr1.q.out | 569 +
.../clientpositive/llap/transform_ppr2.q.out | 475 +
.../results/clientpositive/llap/union2.q.out | 104 +
.../results/clientpositive/llap/union3.q.out | 251 +
.../results/clientpositive/llap/union4.q.out | 175 +
.../results/clientpositive/llap/union5.q.out | 150 +
.../results/clientpositive/llap/union6.q.out | 172 +
.../results/clientpositive/llap/union7.q.out | 148 +
.../results/clientpositive/llap/union8.q.out | 1601 ++
.../results/clientpositive/llap/union9.q.out | 130 +
.../clientpositive/llap/unionDistinct_2.q.out | 545 +
.../llap/update_after_multiple_inserts.q.out | 78 +
.../llap/update_all_non_partitioned.q.out | 62 +
.../llap/update_all_partitioned.q.out | 106 +
.../clientpositive/llap/update_all_types.q.out | 196 +
.../clientpositive/llap/update_orig_table.q.out | 62 +
.../clientpositive/llap/update_tmp_table.q.out | 62 +
.../clientpositive/llap/update_two_cols.q.out | 63 +
.../llap/update_where_no_match.q.out | 62 +
.../llap/update_where_non_partitioned.q.out | 62 +
.../llap/update_where_partitioned.q.out | 106 +
.../clientpositive/llap/vector_acid3.q.out | 31 +
.../llap/vector_aggregate_9.q.out | 174 +
.../clientpositive/llap/vector_between_in.q.out | 691 +
.../llap/vector_binary_join_groupby.q.out | 305 +
.../clientpositive/llap/vector_bucket.q.out | 104 +
.../llap/vector_cast_constant.q.java1.7.out | 217 +
.../clientpositive/llap/vector_char_2.q.out | 292 +
.../clientpositive/llap/vector_char_4.q.out | 175 +
.../llap/vector_char_mapjoin1.q.out | 470 +
.../llap/vector_char_simple.q.out | 342 +
.../clientpositive/llap/vector_coalesce.q.out | 362 +
.../clientpositive/llap/vector_coalesce_2.q.out | 304 +
.../llap/vector_count_distinct.q.out | 1381 ++
.../clientpositive/llap/vector_data_types.q.out | 285 +
.../clientpositive/llap/vector_date_1.q.out | 719 +
.../clientpositive/llap/vector_decimal_1.q.out | 591 +
.../llap/vector_decimal_10_0.q.out | 112 +
.../clientpositive/llap/vector_decimal_2.q.out | 1658 ++
.../clientpositive/llap/vector_decimal_3.q.out | 390 +
.../clientpositive/llap/vector_decimal_4.q.out | 250 +
.../clientpositive/llap/vector_decimal_5.q.out | 239 +
.../clientpositive/llap/vector_decimal_6.q.out | 303 +
.../llap/vector_decimal_aggregate.q.out | 232 +
.../llap/vector_decimal_cast.q.out | 41 +
.../llap/vector_decimal_expressions.q.out | 96 +
.../llap/vector_decimal_mapjoin.q.out | 264 +
.../llap/vector_decimal_math_funcs.q.out | 192 +
.../llap/vector_decimal_round.q.out | 460 +
.../llap/vector_decimal_round_2.q.out | 500 +
.../llap/vector_decimal_trailing.q.out | 121 +
.../llap/vector_decimal_udf.q.out | 2756 +++
.../llap/vector_decimal_udf2.q.out | 188 +
.../clientpositive/llap/vector_distinct_2.q.out | 1866 ++
.../clientpositive/llap/vector_elt.q.out | 121 +
.../clientpositive/llap/vector_groupby_3.q.out | 1869 ++
.../llap/vector_groupby_reduce.q.out | 465 +
.../llap/vector_grouping_sets.q.out | 269 +
.../clientpositive/llap/vector_if_expr.q.out | 82 +
.../clientpositive/llap/vector_inner_join.q.out | 806 +
.../clientpositive/llap/vector_interval_1.q.out | 822 +
.../clientpositive/llap/vector_interval_2.q.out | 1620 ++
.../llap/vector_interval_mapjoin.q.out | 281 +
.../clientpositive/llap/vector_join30.q.out | 1375 ++
.../llap/vector_join_filters.q.out | 222 +
.../clientpositive/llap/vector_join_nulls.q.out | 195 +
.../llap/vector_left_outer_join.q.out | 141 +
.../llap/vector_left_outer_join2.q.out | 559 +
.../llap/vector_mapjoin_reduce.q.out | 327 +
.../llap/vector_mr_diff_schema_alias.q.out | 383 +
.../llap/vector_non_string_partition.q.out | 182 +
.../llap/vector_null_projection.q.out | 186 +
.../llap/vector_nullsafe_join.q.out | 1210 ++
.../clientpositive/llap/vector_orderby_5.q.out | 189 +
.../llap/vector_outer_join0.q.out | 232 +
.../llap/vector_outer_join5.q.out | 1330 ++
.../llap/vector_partition_diff_num_cols.q.out | 614 +
.../llap/vector_partitioned_date_time.q.out | 2047 +++
.../llap/vector_reduce_groupby_decimal.q.out | 205 +
.../llap/vector_string_concat.q.out | 415 +
.../clientpositive/llap/vector_varchar_4.q.out | 175 +
.../llap/vector_varchar_mapjoin1.q.out | 454 +
.../llap/vector_varchar_simple.q.out | 342 +
.../clientpositive/llap/vectorization_0.q.out | 1099 ++
.../clientpositive/llap/vectorization_1.q.out | 49 +
.../clientpositive/llap/vectorization_10.q.out | 298 +
.../clientpositive/llap/vectorization_11.q.out | 80 +
.../clientpositive/llap/vectorization_12.q.out | 602 +
.../clientpositive/llap/vectorization_13.q.out | 510 +
.../clientpositive/llap/vectorization_14.q.out | 836 +
.../clientpositive/llap/vectorization_15.q.out | 253 +
.../clientpositive/llap/vectorization_16.q.out | 671 +
.../clientpositive/llap/vectorization_17.q.out | 507 +
.../clientpositive/llap/vectorization_2.q.out | 53 +
.../clientpositive/llap/vectorization_3.q.out | 59 +
.../clientpositive/llap/vectorization_4.q.out | 53 +
.../clientpositive/llap/vectorization_5.q.out | 47 +
.../clientpositive/llap/vectorization_6.q.out | 1624 ++
.../clientpositive/llap/vectorization_7.q.out | 380 +
.../clientpositive/llap/vectorization_8.q.out | 354 +
.../clientpositive/llap/vectorization_9.q.out | 671 +
.../llap/vectorization_decimal_date.q.out | 51 +
.../llap/vectorization_div0.q.out | 485 +
.../llap/vectorization_limit.q.out | 554 +
.../llap/vectorization_nested_udf.q.out | 9 +
.../clientpositive/llap/vectorization_not.q.out | 58 +
.../llap/vectorization_part.q.out | 72 +
.../llap/vectorization_part_project.q.out | 123 +
.../llap/vectorization_pushdown.q.out | 71 +
.../llap/vectorization_short_regress.q.out | 3414 ++++
.../llap/vectorized_bucketmapjoin1.q.out | 376 +
.../clientpositive/llap/vectorized_case.q.out | 95 +
.../clientpositive/llap/vectorized_casts.q.out | 370 +
.../llap/vectorized_context.q.out | 332 +
.../llap/vectorized_date_funcs.q.out | 1019 ++
.../llap/vectorized_distinct_gby.q.out | 172 +
.../vectorized_dynamic_partition_pruning.q.out | 5352 ++++++
.../llap/vectorized_mapjoin.q.out | 114 +
.../llap/vectorized_math_funcs.q.out | 247 +
.../llap/vectorized_nested_mapjoin.q.out | 140 +
.../llap/vectorized_parquet.q.out | 325 +
.../clientpositive/llap/vectorized_ptf.q.out | 8992 ++++++++++
.../llap/vectorized_rcfile_columnar.q.out | 62 +
.../llap/vectorized_shufflejoin.q.out | 132 +
.../llap/vectorized_string_funcs.q.out | 123 +
.../llap/vectorized_timestamp_funcs.q.out | 883 +
294 files changed, 219926 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/acid_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_vectorization.q.out b/ql/src/test/results/clientpositive/llap/acid_vectorization.q.out
new file mode 100644
index 0000000..1792979
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/acid_vectorization.q.out
@@ -0,0 +1,62 @@
+PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_vectorized
+POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_vectorized
+PREHOOK: query: insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_vectorized
+POSTHOOK: query: insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_vectorized
+POSTHOOK: Lineage: acid_vectorized.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_vectorized.b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: insert into table acid_vectorized values (1, 'bar')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@acid_vectorized
+POSTHOOK: query: insert into table acid_vectorized values (1, 'bar')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@acid_vectorized
+POSTHOOK: Lineage: acid_vectorized.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: acid_vectorized.b SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: update acid_vectorized set b = 'foo' where b = 'bar'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_vectorized
+PREHOOK: Output: default@acid_vectorized
+POSTHOOK: query: update acid_vectorized set b = 'foo' where b = 'bar'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_vectorized
+POSTHOOK: Output: default@acid_vectorized
+PREHOOK: query: delete from acid_vectorized where b = 'foo'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_vectorized
+PREHOOK: Output: default@acid_vectorized
+POSTHOOK: query: delete from acid_vectorized where b = 'foo'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_vectorized
+POSTHOOK: Output: default@acid_vectorized
+PREHOOK: query: select a, b from acid_vectorized order by a, b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+POSTHOOK: query: select a, b from acid_vectorized order by a, b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa
+-1073051226 A34p7oRr2WvUJNf
+-1072910839 0iqrc5
+-1072081801 dPkN74F7
+-1072076362 2uLyD28144vklju213J1mr
+-1071480828 aw724t8c5558x2xneC624
+-1071363017 Anj0oF
+-1070883071 0ruyd6Y50JpdGRf6HqD
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/acid_vectorization_project.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_vectorization_project.q.out b/ql/src/test/results/clientpositive/llap/acid_vectorization_project.q.out
new file mode 100644
index 0000000..1bdacb9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/acid_vectorization_project.q.out
@@ -0,0 +1,73 @@
+PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_vectorized
+POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING, c float) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_vectorized
+PREHOOK: query: insert into table acid_vectorized select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@acid_vectorized
+POSTHOOK: query: insert into table acid_vectorized select cint, cstring1, cfloat from alltypesorc where cint is not null order by cint limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@acid_vectorized
+POSTHOOK: Lineage: acid_vectorized.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: acid_vectorized.b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: acid_vectorized.c SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+PREHOOK: query: select a,b from acid_vectorized order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+POSTHOOK: query: select a,b from acid_vectorized order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+-1073279343 oj1YrV5Wa
+-1073051226 A34p7oRr2WvUJNf
+-1072910839 0iqrc5
+-1072081801 dPkN74F7
+-1072076362 2uLyD28144vklju213J1mr
+-1071480828 aw724t8c5558x2xneC624
+-1071363017 Anj0oF
+-1070883071 0ruyd6Y50JpdGRf6HqD
+-1070551679 iUR3Q
+-1069736047 k17Am8uPHWk02cEf1jet
+PREHOOK: query: select a,c from acid_vectorized order by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+POSTHOOK: query: select a,c from acid_vectorized order by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+-1073279343 11.0
+-1073051226 NULL
+-1072910839 11.0
+-1072081801 NULL
+-1072076362 NULL
+-1071480828 -51.0
+-1071363017 8.0
+-1070883071 NULL
+-1070551679 NULL
+-1069736047 11.0
+PREHOOK: query: select b,c from acid_vectorized order by b
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+POSTHOOK: query: select b,c from acid_vectorized order by b
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acid_vectorized
+#### A masked pattern was here ####
+0iqrc5 11.0
+0ruyd6Y50JpdGRf6HqD NULL
+2uLyD28144vklju213J1mr NULL
+A34p7oRr2WvUJNf NULL
+Anj0oF 8.0
+aw724t8c5558x2xneC624 -51.0
+dPkN74F7 NULL
+iUR3Q NULL
+k17Am8uPHWk02cEf1jet 11.0
+oj1YrV5Wa 11.0
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/alter_merge_2_orc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/alter_merge_2_orc.q.out b/ql/src/test/results/clientpositive/llap/alter_merge_2_orc.q.out
new file mode 100644
index 0000000..7e30942
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/alter_merge_2_orc.q.out
@@ -0,0 +1,123 @@
+PREHOOK: query: create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: query: create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc_merge_test_part
+PREHOOK: query: alter table src_orc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: query: alter table src_orc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+PREHOOK: query: desc extended src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: query: desc extended src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part
+key int
+value string
+ds string
+ts string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+ts string
+
+#### A masked pattern was here ####
+PREHOOK: query: insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src order by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+POSTHOOK: query: insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src order by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2012-01-03,ts=2012-01-03+14:46:31).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2012-01-03,ts=2012-01-03+14:46:31).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src order by key, value limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+POSTHOOK: query: insert into table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src order by key, value limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2012-01-03,ts=2012-01-03+14:46:31).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2012-01-03,ts=2012-01-03+14:46:31).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src order by key, value limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+POSTHOOK: query: insert into table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src order by key, value limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2012-01-03,ts=2012-01-03+14:46:31).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2012-01-03,ts=2012-01-03+14:46:31).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+#### A masked pattern was here ####
+610
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+#### A masked pattern was here ####
+143807 50348357904
+PREHOOK: query: alter table src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+POSTHOOK: query: alter table src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+PREHOOK: query: select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+#### A masked pattern was here ####
+610
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31
+#### A masked pattern was here ####
+143807 50348357904
+PREHOOK: query: drop table src_orc_merge_test_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: query: drop table src_orc_merge_test_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Output: default@src_orc_merge_test_part
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/alter_merge_orc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/alter_merge_orc.q.out b/ql/src/test/results/clientpositive/llap/alter_merge_orc.q.out
new file mode 100644
index 0000000..b5a6d04
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/alter_merge_orc.q.out
@@ -0,0 +1,263 @@
+PREHOOK: query: create table src_orc_merge_test(key int, value string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc_merge_test
+POSTHOOK: query: create table src_orc_merge_test(key int, value string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc_merge_test
+PREHOOK: query: insert overwrite table src_orc_merge_test select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test
+POSTHOOK: query: insert overwrite table src_orc_merge_test select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test
+POSTHOOK: Lineage: src_orc_merge_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test
+POSTHOOK: query: insert into table src_orc_merge_test select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test
+POSTHOOK: Lineage: src_orc_merge_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test
+POSTHOOK: query: insert into table src_orc_merge_test select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test
+POSTHOOK: Lineage: src_orc_merge_test.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show table extended like `src_orc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:3
+totalFileSize:7545
+maxFileSize:2515
+minFileSize:2515
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from src_orc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+1500
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+390273 108631194210
+PREHOOK: query: alter table src_orc_merge_test concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@src_orc_merge_test
+PREHOOK: Output: default@src_orc_merge_test
+POSTHOOK: query: alter table src_orc_merge_test concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@src_orc_merge_test
+POSTHOOK: Output: default@src_orc_merge_test
+PREHOOK: query: show table extended like `src_orc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:1
+totalFileSize:7198
+maxFileSize:7198
+minFileSize:7198
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from src_orc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+1500
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test
+#### A masked pattern was here ####
+390273 108631194210
+PREHOOK: query: create table src_orc_merge_test_part(key int, value string) partitioned by (ds string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: query: create table src_orc_merge_test_part(key int, value string) partitioned by (ds string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc_merge_test_part
+PREHOOK: query: alter table src_orc_merge_test_part add partition (ds='2011')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: query: alter table src_orc_merge_test_part add partition (ds='2011')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+PREHOOK: query: insert overwrite table src_orc_merge_test_part partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: query: insert overwrite table src_orc_merge_test_part partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: query: insert into table src_orc_merge_test_part partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: query: insert into table src_orc_merge_test_part partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show table extended like `src_orc_merge_test_part` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test_part` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test_part
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:3
+totalFileSize:7545
+maxFileSize:2515
+minFileSize:2515
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from src_orc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+1500
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+390273 108631194210
+PREHOOK: query: alter table src_orc_merge_test_part partition (ds='2011') concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Output: default@src_orc_merge_test_part@ds=2011
+POSTHOOK: query: alter table src_orc_merge_test_part partition (ds='2011') concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Output: default@src_orc_merge_test_part@ds=2011
+PREHOOK: query: show table extended like `src_orc_merge_test_part` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test_part` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test_part
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:1
+totalFileSize:7198
+maxFileSize:7198
+minFileSize:7198
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from src_orc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from src_orc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+1500
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Input: default@src_orc_merge_test_part@ds=2011
+#### A masked pattern was here ####
+390273 108631194210
+PREHOOK: query: drop table src_orc_merge_test
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_orc_merge_test
+PREHOOK: Output: default@src_orc_merge_test
+POSTHOOK: query: drop table src_orc_merge_test
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_orc_merge_test
+POSTHOOK: Output: default@src_orc_merge_test
+PREHOOK: query: drop table src_orc_merge_test_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_orc_merge_test_part
+PREHOOK: Output: default@src_orc_merge_test_part
+POSTHOOK: query: drop table src_orc_merge_test_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part
+POSTHOOK: Output: default@src_orc_merge_test_part
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/alter_merge_stats_orc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/llap/alter_merge_stats_orc.q.out
new file mode 100644
index 0000000..cefe069
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/alter_merge_stats_orc.q.out
@@ -0,0 +1,382 @@
+PREHOOK: query: create table src_orc_merge_test_stat(key int, value string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: create table src_orc_merge_test_stat(key int, value string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: insert overwrite table src_orc_merge_test_stat select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: insert overwrite table src_orc_merge_test_stat select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: Lineage: src_orc_merge_test_stat.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_stat.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_stat select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: insert into table src_orc_merge_test_stat select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: Lineage: src_orc_merge_test_stat.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_stat.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_stat select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: insert into table src_orc_merge_test_stat select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: Lineage: src_orc_merge_test_stat.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_stat.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show table extended like `src_orc_merge_test_stat`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test_stat`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test_stat
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:3
+totalFileSize:7545
+maxFileSize:2515
+minFileSize:2515
+#### A masked pattern was here ####
+
+PREHOOK: query: desc extended src_orc_merge_test_stat
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: query: desc extended src_orc_merge_test_stat
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+key int
+value string
+
+#### A masked pattern was here ####
+PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_stat
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: desc formatted src_orc_merge_test_stat
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: query: desc formatted src_orc_merge_test_stat
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+# col_name data_type comment
+
+key int
+value string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 3
+ numRows 1500
+ rawDataSize 141000
+ totalSize 7545
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: alter table src_orc_merge_test_stat concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@src_orc_merge_test_stat
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: alter table src_orc_merge_test_stat concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_stat
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: desc formatted src_orc_merge_test_stat
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: query: desc formatted src_orc_merge_test_stat
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+# col_name data_type comment
+
+key int
+value string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 1500
+ rawDataSize 141000
+ totalSize 7198
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: create table src_orc_merge_test_part_stat(key int, value string) partitioned by (ds string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: query: create table src_orc_merge_test_part_stat(key int, value string) partitioned by (ds string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc_merge_test_part_stat
+PREHOOK: query: alter table src_orc_merge_test_part_stat add partition (ds='2011')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: query: alter table src_orc_merge_test_part_stat add partition (ds='2011')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+PREHOOK: query: insert overwrite table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: query: insert overwrite table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: query: insert into table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert into table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: query: insert into table src_orc_merge_test_part_stat partition (ds='2011') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc_merge_test_part_stat PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show table extended like `src_orc_merge_test_part_stat` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_orc_merge_test_part_stat` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_orc_merge_test_part_stat
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:3
+totalFileSize:7545
+maxFileSize:2515
+minFileSize:2515
+#### A masked pattern was here ####
+
+PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+# col_name data_type comment
+
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+
+# Detailed Partition Information
+Partition Value: [2011]
+Database: default
+Table: src_orc_merge_test_part_stat
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 3
+ numRows 1500
+ rawDataSize 141000
+ totalSize 7545
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+# col_name data_type comment
+
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+
+# Detailed Partition Information
+Partition Value: [2011]
+Database: default
+Table: src_orc_merge_test_part_stat
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 3
+ numRows 1500
+ rawDataSize 141000
+ totalSize 7545
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: alter table src_orc_merge_test_part_stat partition (ds='2011') concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: query: alter table src_orc_merge_test_part_stat partition (ds='2011') concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011
+PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+# col_name data_type comment
+
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+
+# Detailed Partition Information
+Partition Value: [2011]
+Database: default
+Table: src_orc_merge_test_part_stat
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 1500
+ rawDataSize 141000
+ totalSize 7198
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table src_orc_merge_test_stat
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_orc_merge_test_stat
+PREHOOK: Output: default@src_orc_merge_test_stat
+POSTHOOK: query: drop table src_orc_merge_test_stat
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_orc_merge_test_stat
+POSTHOOK: Output: default@src_orc_merge_test_stat
+PREHOOK: query: drop table src_orc_merge_test_part_stat
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_orc_merge_test_part_stat
+PREHOOK: Output: default@src_orc_merge_test_part_stat
+POSTHOOK: query: drop table src_orc_merge_test_part_stat
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_orc_merge_test_part_stat
+POSTHOOK: Output: default@src_orc_merge_test_part_stat
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_join0.q.out b/ql/src/test/results/clientpositive/llap/auto_join0.q.out
new file mode 100644
index 0000000..f5be383
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_join0.q.out
@@ -0,0 +1,172 @@
+Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Reducer 2' is a cross product
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+select sum(hash(a.k1,a.v1,a.k2, a.v2))
+from (
+SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10 order by src.key, src.value) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10 order by src.key, src.value) src2
+ SORT BY k1, v1, k2, v2
+) a
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+select sum(hash(a.k1,a.v1,a.k2, a.v2))
+from (
+SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10 order by src.key, src.value) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10 order by src.key, src.value) src2
+ SORT BY k1, v1, k2, v2
+) a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (BROADCAST_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+ Reducer 6 <- Map 5 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 10) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col2, _col3
+ input vertices:
+ 1 Reducer 6
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+ sort order: ++++
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(hash(_col0,_col1,_col2,_col3))
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 6
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Reducer 2' is a cross product
+PREHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2))
+from (
+SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10 order by src.key, src.value) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10 order by src.key, src.value) src2
+ SORT BY k1, v1, k2, v2
+) a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2))
+from (
+SELECT src1.key as k1, src1.value as v1,
+ src2.key as k2, src2.value as v2 FROM
+ (SELECT * FROM src WHERE src.key < 10 order by src.key, src.value) src1
+ JOIN
+ (SELECT * FROM src WHERE src.key < 10 order by src.key, src.value) src2
+ SORT BY k1, v1, k2, v2
+) a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+34441656720
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_join1.q.out b/ql/src/test/results/clientpositive/llap/auto_join1.q.out
new file mode 100644
index 0000000..2a771fc
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_join1.q.out
@@ -0,0 +1,125 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest_j1
+PREHOOK: query: explain
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col1, _col2
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: UDFToInteger(_col2) (type: int), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_j1
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest_j1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest_j1
+POSTHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key)
+INSERT OVERWRITE TABLE dest_j1 SELECT src1.key, src2.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest_j1
+POSTHOOK: Lineage: dest_j1.key EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT sum(hash(dest_j1.key,dest_j1.value)) FROM dest_j1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest_j1
+#### A masked pattern was here ####
+101861029915
[15/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/load_dyn_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/load_dyn_part2.q.out b/ql/src/test/results/clientpositive/llap/load_dyn_part2.q.out
new file mode 100644
index 0000000..c0ed71c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/load_dyn_part2.q.out
@@ -0,0 +1,2152 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table if not exists nzhang_part_bucket (key string, value string)
+ partitioned by (ds string, hr string)
+ clustered by (key) into 10 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part_bucket
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table if not exists nzhang_part_bucket (key string, value string)
+ partitioned by (ds string, hr string)
+ clustered by (key) into 10 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part_bucket
+PREHOOK: query: describe extended nzhang_part_bucket
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part_bucket
+POSTHOOK: query: describe extended nzhang_part_bucket
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part_bucket
+key string
+value string
+ds string
+hr string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+#### A masked pattern was here ####
+PREHOOK: query: explain
+insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), hr (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part_bucket
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds 2010-03-23
+ hr
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part_bucket
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part_bucket@ds=2010-03-23
+POSTHOOK: query: insert overwrite table nzhang_part_bucket partition (ds='2010-03-23', hr) select key, value, hr from srcpart where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part_bucket@ds=2010-03-23/hr=11
+POSTHOOK: Output: default@nzhang_part_bucket@ds=2010-03-23/hr=12
+POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part_bucket PARTITION(ds=2010-03-23,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: show partitions nzhang_part_bucket
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@nzhang_part_bucket
+POSTHOOK: query: show partitions nzhang_part_bucket
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@nzhang_part_bucket
+ds=2010-03-23/hr=11
+ds=2010-03-23/hr=12
+PREHOOK: query: select * from nzhang_part_bucket where ds='2010-03-23' and hr='11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part_bucket
+PREHOOK: Input: default@nzhang_part_bucket@ds=2010-03-23/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_part_bucket where ds='2010-03-23' and hr='11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part_bucket
+POSTHOOK: Input: default@nzhang_part_bucket@ds=2010-03-23/hr=11
+#### A masked pattern was here ####
+0 val_0 2010-03-23 11
+0 val_0 2010-03-23 11
+0 val_0 2010-03-23 11
+0 val_0 2010-03-23 11
+0 val_0 2010-03-23 11
+0 val_0 2010-03-23 11
+10 val_10 2010-03-23 11
+10 val_10 2010-03-23 11
+100 val_100 2010-03-23 11
+100 val_100 2010-03-23 11
+100 val_100 2010-03-23 11
+100 val_100 2010-03-23 11
+103 val_103 2010-03-23 11
+103 val_103 2010-03-23 11
+103 val_103 2010-03-23 11
+103 val_103 2010-03-23 11
+104 val_104 2010-03-23 11
+104 val_104 2010-03-23 11
+104 val_104 2010-03-23 11
+104 val_104 2010-03-23 11
+105 val_105 2010-03-23 11
+105 val_105 2010-03-23 11
+11 val_11 2010-03-23 11
+11 val_11 2010-03-23 11
+111 val_111 2010-03-23 11
+111 val_111 2010-03-23 11
+113 val_113 2010-03-23 11
+113 val_113 2010-03-23 11
+113 val_113 2010-03-23 11
+113 val_113 2010-03-23 11
+114 val_114 2010-03-23 11
+114 val_114 2010-03-23 11
+116 val_116 2010-03-23 11
+116 val_116 2010-03-23 11
+118 val_118 2010-03-23 11
+118 val_118 2010-03-23 11
+118 val_118 2010-03-23 11
+118 val_118 2010-03-23 11
+119 val_119 2010-03-23 11
+119 val_119 2010-03-23 11
+119 val_119 2010-03-23 11
+119 val_119 2010-03-23 11
+119 val_119 2010-03-23 11
+119 val_119 2010-03-23 11
+12 val_12 2010-03-23 11
+12 val_12 2010-03-23 11
+12 val_12 2010-03-23 11
+12 val_12 2010-03-23 11
+120 val_120 2010-03-23 11
+120 val_120 2010-03-23 11
+120 val_120 2010-03-23 11
+120 val_120 2010-03-23 11
+125 val_125 2010-03-23 11
+125 val_125 2010-03-23 11
+125 val_125 2010-03-23 11
+125 val_125 2010-03-23 11
+126 val_126 2010-03-23 11
+126 val_126 2010-03-23 11
+128 val_128 2010-03-23 11
+128 val_128 2010-03-23 11
+128 val_128 2010-03-23 11
+128 val_128 2010-03-23 11
+128 val_128 2010-03-23 11
+128 val_128 2010-03-23 11
+129 val_129 2010-03-23 11
+129 val_129 2010-03-23 11
+129 val_129 2010-03-23 11
+129 val_129 2010-03-23 11
+131 val_131 2010-03-23 11
+131 val_131 2010-03-23 11
+133 val_133 2010-03-23 11
+133 val_133 2010-03-23 11
+134 val_134 2010-03-23 11
+134 val_134 2010-03-23 11
+134 val_134 2010-03-23 11
+134 val_134 2010-03-23 11
+136 val_136 2010-03-23 11
+136 val_136 2010-03-23 11
+137 val_137 2010-03-23 11
+137 val_137 2010-03-23 11
+137 val_137 2010-03-23 11
+137 val_137 2010-03-23 11
+138 val_138 2010-03-23 11
+138 val_138 2010-03-23 11
+138 val_138 2010-03-23 11
+138 val_138 2010-03-23 11
+138 val_138 2010-03-23 11
+138 val_138 2010-03-23 11
+138 val_138 2010-03-23 11
+138 val_138 2010-03-23 11
+143 val_143 2010-03-23 11
+143 val_143 2010-03-23 11
+145 val_145 2010-03-23 11
+145 val_145 2010-03-23 11
+146 val_146 2010-03-23 11
+146 val_146 2010-03-23 11
+146 val_146 2010-03-23 11
+146 val_146 2010-03-23 11
+149 val_149 2010-03-23 11
+149 val_149 2010-03-23 11
+149 val_149 2010-03-23 11
+149 val_149 2010-03-23 11
+15 val_15 2010-03-23 11
+15 val_15 2010-03-23 11
+15 val_15 2010-03-23 11
+15 val_15 2010-03-23 11
+150 val_150 2010-03-23 11
+150 val_150 2010-03-23 11
+152 val_152 2010-03-23 11
+152 val_152 2010-03-23 11
+152 val_152 2010-03-23 11
+152 val_152 2010-03-23 11
+153 val_153 2010-03-23 11
+153 val_153 2010-03-23 11
+155 val_155 2010-03-23 11
+155 val_155 2010-03-23 11
+156 val_156 2010-03-23 11
+156 val_156 2010-03-23 11
+157 val_157 2010-03-23 11
+157 val_157 2010-03-23 11
+158 val_158 2010-03-23 11
+158 val_158 2010-03-23 11
+160 val_160 2010-03-23 11
+160 val_160 2010-03-23 11
+162 val_162 2010-03-23 11
+162 val_162 2010-03-23 11
+163 val_163 2010-03-23 11
+163 val_163 2010-03-23 11
+164 val_164 2010-03-23 11
+164 val_164 2010-03-23 11
+164 val_164 2010-03-23 11
+164 val_164 2010-03-23 11
+165 val_165 2010-03-23 11
+165 val_165 2010-03-23 11
+165 val_165 2010-03-23 11
+165 val_165 2010-03-23 11
+166 val_166 2010-03-23 11
+166 val_166 2010-03-23 11
+167 val_167 2010-03-23 11
+167 val_167 2010-03-23 11
+167 val_167 2010-03-23 11
+167 val_167 2010-03-23 11
+167 val_167 2010-03-23 11
+167 val_167 2010-03-23 11
+168 val_168 2010-03-23 11
+168 val_168 2010-03-23 11
+169 val_169 2010-03-23 11
+169 val_169 2010-03-23 11
+169 val_169 2010-03-23 11
+169 val_169 2010-03-23 11
+169 val_169 2010-03-23 11
+169 val_169 2010-03-23 11
+169 val_169 2010-03-23 11
+169 val_169 2010-03-23 11
+17 val_17 2010-03-23 11
+17 val_17 2010-03-23 11
+170 val_170 2010-03-23 11
+170 val_170 2010-03-23 11
+172 val_172 2010-03-23 11
+172 val_172 2010-03-23 11
+172 val_172 2010-03-23 11
+172 val_172 2010-03-23 11
+174 val_174 2010-03-23 11
+174 val_174 2010-03-23 11
+174 val_174 2010-03-23 11
+174 val_174 2010-03-23 11
+175 val_175 2010-03-23 11
+175 val_175 2010-03-23 11
+175 val_175 2010-03-23 11
+175 val_175 2010-03-23 11
+176 val_176 2010-03-23 11
+176 val_176 2010-03-23 11
+176 val_176 2010-03-23 11
+176 val_176 2010-03-23 11
+177 val_177 2010-03-23 11
+177 val_177 2010-03-23 11
+178 val_178 2010-03-23 11
+178 val_178 2010-03-23 11
+179 val_179 2010-03-23 11
+179 val_179 2010-03-23 11
+179 val_179 2010-03-23 11
+179 val_179 2010-03-23 11
+18 val_18 2010-03-23 11
+18 val_18 2010-03-23 11
+18 val_18 2010-03-23 11
+18 val_18 2010-03-23 11
+180 val_180 2010-03-23 11
+180 val_180 2010-03-23 11
+181 val_181 2010-03-23 11
+181 val_181 2010-03-23 11
+183 val_183 2010-03-23 11
+183 val_183 2010-03-23 11
+186 val_186 2010-03-23 11
+186 val_186 2010-03-23 11
+187 val_187 2010-03-23 11
+187 val_187 2010-03-23 11
+187 val_187 2010-03-23 11
+187 val_187 2010-03-23 11
+187 val_187 2010-03-23 11
+187 val_187 2010-03-23 11
+189 val_189 2010-03-23 11
+189 val_189 2010-03-23 11
+19 val_19 2010-03-23 11
+19 val_19 2010-03-23 11
+190 val_190 2010-03-23 11
+190 val_190 2010-03-23 11
+191 val_191 2010-03-23 11
+191 val_191 2010-03-23 11
+191 val_191 2010-03-23 11
+191 val_191 2010-03-23 11
+192 val_192 2010-03-23 11
+192 val_192 2010-03-23 11
+193 val_193 2010-03-23 11
+193 val_193 2010-03-23 11
+193 val_193 2010-03-23 11
+193 val_193 2010-03-23 11
+193 val_193 2010-03-23 11
+193 val_193 2010-03-23 11
+194 val_194 2010-03-23 11
+194 val_194 2010-03-23 11
+195 val_195 2010-03-23 11
+195 val_195 2010-03-23 11
+195 val_195 2010-03-23 11
+195 val_195 2010-03-23 11
+196 val_196 2010-03-23 11
+196 val_196 2010-03-23 11
+197 val_197 2010-03-23 11
+197 val_197 2010-03-23 11
+197 val_197 2010-03-23 11
+197 val_197 2010-03-23 11
+199 val_199 2010-03-23 11
+199 val_199 2010-03-23 11
+199 val_199 2010-03-23 11
+199 val_199 2010-03-23 11
+199 val_199 2010-03-23 11
+199 val_199 2010-03-23 11
+2 val_2 2010-03-23 11
+2 val_2 2010-03-23 11
+20 val_20 2010-03-23 11
+20 val_20 2010-03-23 11
+200 val_200 2010-03-23 11
+200 val_200 2010-03-23 11
+200 val_200 2010-03-23 11
+200 val_200 2010-03-23 11
+201 val_201 2010-03-23 11
+201 val_201 2010-03-23 11
+202 val_202 2010-03-23 11
+202 val_202 2010-03-23 11
+203 val_203 2010-03-23 11
+203 val_203 2010-03-23 11
+203 val_203 2010-03-23 11
+203 val_203 2010-03-23 11
+205 val_205 2010-03-23 11
+205 val_205 2010-03-23 11
+205 val_205 2010-03-23 11
+205 val_205 2010-03-23 11
+207 val_207 2010-03-23 11
+207 val_207 2010-03-23 11
+207 val_207 2010-03-23 11
+207 val_207 2010-03-23 11
+208 val_208 2010-03-23 11
+208 val_208 2010-03-23 11
+208 val_208 2010-03-23 11
+208 val_208 2010-03-23 11
+208 val_208 2010-03-23 11
+208 val_208 2010-03-23 11
+209 val_209 2010-03-23 11
+209 val_209 2010-03-23 11
+209 val_209 2010-03-23 11
+209 val_209 2010-03-23 11
+213 val_213 2010-03-23 11
+213 val_213 2010-03-23 11
+213 val_213 2010-03-23 11
+213 val_213 2010-03-23 11
+214 val_214 2010-03-23 11
+214 val_214 2010-03-23 11
+216 val_216 2010-03-23 11
+216 val_216 2010-03-23 11
+216 val_216 2010-03-23 11
+216 val_216 2010-03-23 11
+217 val_217 2010-03-23 11
+217 val_217 2010-03-23 11
+217 val_217 2010-03-23 11
+217 val_217 2010-03-23 11
+218 val_218 2010-03-23 11
+218 val_218 2010-03-23 11
+219 val_219 2010-03-23 11
+219 val_219 2010-03-23 11
+219 val_219 2010-03-23 11
+219 val_219 2010-03-23 11
+221 val_221 2010-03-23 11
+221 val_221 2010-03-23 11
+221 val_221 2010-03-23 11
+221 val_221 2010-03-23 11
+222 val_222 2010-03-23 11
+222 val_222 2010-03-23 11
+223 val_223 2010-03-23 11
+223 val_223 2010-03-23 11
+223 val_223 2010-03-23 11
+223 val_223 2010-03-23 11
+224 val_224 2010-03-23 11
+224 val_224 2010-03-23 11
+224 val_224 2010-03-23 11
+224 val_224 2010-03-23 11
+226 val_226 2010-03-23 11
+226 val_226 2010-03-23 11
+228 val_228 2010-03-23 11
+228 val_228 2010-03-23 11
+229 val_229 2010-03-23 11
+229 val_229 2010-03-23 11
+229 val_229 2010-03-23 11
+229 val_229 2010-03-23 11
+230 val_230 2010-03-23 11
+230 val_230 2010-03-23 11
+230 val_230 2010-03-23 11
+230 val_230 2010-03-23 11
+230 val_230 2010-03-23 11
+230 val_230 2010-03-23 11
+230 val_230 2010-03-23 11
+230 val_230 2010-03-23 11
+230 val_230 2010-03-23 11
+230 val_230 2010-03-23 11
+233 val_233 2010-03-23 11
+233 val_233 2010-03-23 11
+233 val_233 2010-03-23 11
+233 val_233 2010-03-23 11
+235 val_235 2010-03-23 11
+235 val_235 2010-03-23 11
+237 val_237 2010-03-23 11
+237 val_237 2010-03-23 11
+237 val_237 2010-03-23 11
+237 val_237 2010-03-23 11
+238 val_238 2010-03-23 11
+238 val_238 2010-03-23 11
+238 val_238 2010-03-23 11
+238 val_238 2010-03-23 11
+239 val_239 2010-03-23 11
+239 val_239 2010-03-23 11
+239 val_239 2010-03-23 11
+239 val_239 2010-03-23 11
+24 val_24 2010-03-23 11
+24 val_24 2010-03-23 11
+24 val_24 2010-03-23 11
+24 val_24 2010-03-23 11
+241 val_241 2010-03-23 11
+241 val_241 2010-03-23 11
+242 val_242 2010-03-23 11
+242 val_242 2010-03-23 11
+242 val_242 2010-03-23 11
+242 val_242 2010-03-23 11
+244 val_244 2010-03-23 11
+244 val_244 2010-03-23 11
+247 val_247 2010-03-23 11
+247 val_247 2010-03-23 11
+248 val_248 2010-03-23 11
+248 val_248 2010-03-23 11
+249 val_249 2010-03-23 11
+249 val_249 2010-03-23 11
+252 val_252 2010-03-23 11
+252 val_252 2010-03-23 11
+255 val_255 2010-03-23 11
+255 val_255 2010-03-23 11
+255 val_255 2010-03-23 11
+255 val_255 2010-03-23 11
+256 val_256 2010-03-23 11
+256 val_256 2010-03-23 11
+256 val_256 2010-03-23 11
+256 val_256 2010-03-23 11
+257 val_257 2010-03-23 11
+257 val_257 2010-03-23 11
+258 val_258 2010-03-23 11
+258 val_258 2010-03-23 11
+26 val_26 2010-03-23 11
+26 val_26 2010-03-23 11
+26 val_26 2010-03-23 11
+26 val_26 2010-03-23 11
+260 val_260 2010-03-23 11
+260 val_260 2010-03-23 11
+262 val_262 2010-03-23 11
+262 val_262 2010-03-23 11
+263 val_263 2010-03-23 11
+263 val_263 2010-03-23 11
+265 val_265 2010-03-23 11
+265 val_265 2010-03-23 11
+265 val_265 2010-03-23 11
+265 val_265 2010-03-23 11
+266 val_266 2010-03-23 11
+266 val_266 2010-03-23 11
+27 val_27 2010-03-23 11
+27 val_27 2010-03-23 11
+272 val_272 2010-03-23 11
+272 val_272 2010-03-23 11
+272 val_272 2010-03-23 11
+272 val_272 2010-03-23 11
+273 val_273 2010-03-23 11
+273 val_273 2010-03-23 11
+273 val_273 2010-03-23 11
+273 val_273 2010-03-23 11
+273 val_273 2010-03-23 11
+273 val_273 2010-03-23 11
+274 val_274 2010-03-23 11
+274 val_274 2010-03-23 11
+275 val_275 2010-03-23 11
+275 val_275 2010-03-23 11
+277 val_277 2010-03-23 11
+277 val_277 2010-03-23 11
+277 val_277 2010-03-23 11
+277 val_277 2010-03-23 11
+277 val_277 2010-03-23 11
+277 val_277 2010-03-23 11
+277 val_277 2010-03-23 11
+277 val_277 2010-03-23 11
+278 val_278 2010-03-23 11
+278 val_278 2010-03-23 11
+278 val_278 2010-03-23 11
+278 val_278 2010-03-23 11
+28 val_28 2010-03-23 11
+28 val_28 2010-03-23 11
+280 val_280 2010-03-23 11
+280 val_280 2010-03-23 11
+280 val_280 2010-03-23 11
+280 val_280 2010-03-23 11
+281 val_281 2010-03-23 11
+281 val_281 2010-03-23 11
+281 val_281 2010-03-23 11
+281 val_281 2010-03-23 11
+282 val_282 2010-03-23 11
+282 val_282 2010-03-23 11
+282 val_282 2010-03-23 11
+282 val_282 2010-03-23 11
+283 val_283 2010-03-23 11
+283 val_283 2010-03-23 11
+284 val_284 2010-03-23 11
+284 val_284 2010-03-23 11
+285 val_285 2010-03-23 11
+285 val_285 2010-03-23 11
+286 val_286 2010-03-23 11
+286 val_286 2010-03-23 11
+287 val_287 2010-03-23 11
+287 val_287 2010-03-23 11
+288 val_288 2010-03-23 11
+288 val_288 2010-03-23 11
+288 val_288 2010-03-23 11
+288 val_288 2010-03-23 11
+289 val_289 2010-03-23 11
+289 val_289 2010-03-23 11
+291 val_291 2010-03-23 11
+291 val_291 2010-03-23 11
+292 val_292 2010-03-23 11
+292 val_292 2010-03-23 11
+296 val_296 2010-03-23 11
+296 val_296 2010-03-23 11
+298 val_298 2010-03-23 11
+298 val_298 2010-03-23 11
+298 val_298 2010-03-23 11
+298 val_298 2010-03-23 11
+298 val_298 2010-03-23 11
+298 val_298 2010-03-23 11
+30 val_30 2010-03-23 11
+30 val_30 2010-03-23 11
+302 val_302 2010-03-23 11
+302 val_302 2010-03-23 11
+305 val_305 2010-03-23 11
+305 val_305 2010-03-23 11
+306 val_306 2010-03-23 11
+306 val_306 2010-03-23 11
+307 val_307 2010-03-23 11
+307 val_307 2010-03-23 11
+307 val_307 2010-03-23 11
+307 val_307 2010-03-23 11
+308 val_308 2010-03-23 11
+308 val_308 2010-03-23 11
+309 val_309 2010-03-23 11
+309 val_309 2010-03-23 11
+309 val_309 2010-03-23 11
+309 val_309 2010-03-23 11
+310 val_310 2010-03-23 11
+310 val_310 2010-03-23 11
+311 val_311 2010-03-23 11
+311 val_311 2010-03-23 11
+311 val_311 2010-03-23 11
+311 val_311 2010-03-23 11
+311 val_311 2010-03-23 11
+311 val_311 2010-03-23 11
+315 val_315 2010-03-23 11
+315 val_315 2010-03-23 11
+316 val_316 2010-03-23 11
+316 val_316 2010-03-23 11
+316 val_316 2010-03-23 11
+316 val_316 2010-03-23 11
+316 val_316 2010-03-23 11
+316 val_316 2010-03-23 11
+317 val_317 2010-03-23 11
+317 val_317 2010-03-23 11
+317 val_317 2010-03-23 11
+317 val_317 2010-03-23 11
+318 val_318 2010-03-23 11
+318 val_318 2010-03-23 11
+318 val_318 2010-03-23 11
+318 val_318 2010-03-23 11
+318 val_318 2010-03-23 11
+318 val_318 2010-03-23 11
+321 val_321 2010-03-23 11
+321 val_321 2010-03-23 11
+321 val_321 2010-03-23 11
+321 val_321 2010-03-23 11
+322 val_322 2010-03-23 11
+322 val_322 2010-03-23 11
+322 val_322 2010-03-23 11
+322 val_322 2010-03-23 11
+323 val_323 2010-03-23 11
+323 val_323 2010-03-23 11
+325 val_325 2010-03-23 11
+325 val_325 2010-03-23 11
+325 val_325 2010-03-23 11
+325 val_325 2010-03-23 11
+327 val_327 2010-03-23 11
+327 val_327 2010-03-23 11
+327 val_327 2010-03-23 11
+327 val_327 2010-03-23 11
+327 val_327 2010-03-23 11
+327 val_327 2010-03-23 11
+33 val_33 2010-03-23 11
+33 val_33 2010-03-23 11
+331 val_331 2010-03-23 11
+331 val_331 2010-03-23 11
+331 val_331 2010-03-23 11
+331 val_331 2010-03-23 11
+332 val_332 2010-03-23 11
+332 val_332 2010-03-23 11
+333 val_333 2010-03-23 11
+333 val_333 2010-03-23 11
+333 val_333 2010-03-23 11
+333 val_333 2010-03-23 11
+335 val_335 2010-03-23 11
+335 val_335 2010-03-23 11
+336 val_336 2010-03-23 11
+336 val_336 2010-03-23 11
+338 val_338 2010-03-23 11
+338 val_338 2010-03-23 11
+339 val_339 2010-03-23 11
+339 val_339 2010-03-23 11
+34 val_34 2010-03-23 11
+34 val_34 2010-03-23 11
+341 val_341 2010-03-23 11
+341 val_341 2010-03-23 11
+342 val_342 2010-03-23 11
+342 val_342 2010-03-23 11
+342 val_342 2010-03-23 11
+342 val_342 2010-03-23 11
+344 val_344 2010-03-23 11
+344 val_344 2010-03-23 11
+344 val_344 2010-03-23 11
+344 val_344 2010-03-23 11
+345 val_345 2010-03-23 11
+345 val_345 2010-03-23 11
+348 val_348 2010-03-23 11
+348 val_348 2010-03-23 11
+348 val_348 2010-03-23 11
+348 val_348 2010-03-23 11
+348 val_348 2010-03-23 11
+348 val_348 2010-03-23 11
+348 val_348 2010-03-23 11
+348 val_348 2010-03-23 11
+348 val_348 2010-03-23 11
+348 val_348 2010-03-23 11
+35 val_35 2010-03-23 11
+35 val_35 2010-03-23 11
+35 val_35 2010-03-23 11
+35 val_35 2010-03-23 11
+35 val_35 2010-03-23 11
+35 val_35 2010-03-23 11
+351 val_351 2010-03-23 11
+351 val_351 2010-03-23 11
+353 val_353 2010-03-23 11
+353 val_353 2010-03-23 11
+353 val_353 2010-03-23 11
+353 val_353 2010-03-23 11
+356 val_356 2010-03-23 11
+356 val_356 2010-03-23 11
+360 val_360 2010-03-23 11
+360 val_360 2010-03-23 11
+362 val_362 2010-03-23 11
+362 val_362 2010-03-23 11
+364 val_364 2010-03-23 11
+364 val_364 2010-03-23 11
+365 val_365 2010-03-23 11
+365 val_365 2010-03-23 11
+366 val_366 2010-03-23 11
+366 val_366 2010-03-23 11
+367 val_367 2010-03-23 11
+367 val_367 2010-03-23 11
+367 val_367 2010-03-23 11
+367 val_367 2010-03-23 11
+368 val_368 2010-03-23 11
+368 val_368 2010-03-23 11
+369 val_369 2010-03-23 11
+369 val_369 2010-03-23 11
+369 val_369 2010-03-23 11
+369 val_369 2010-03-23 11
+369 val_369 2010-03-23 11
+369 val_369 2010-03-23 11
+37 val_37 2010-03-23 11
+37 val_37 2010-03-23 11
+37 val_37 2010-03-23 11
+37 val_37 2010-03-23 11
+373 val_373 2010-03-23 11
+373 val_373 2010-03-23 11
+374 val_374 2010-03-23 11
+374 val_374 2010-03-23 11
+375 val_375 2010-03-23 11
+375 val_375 2010-03-23 11
+377 val_377 2010-03-23 11
+377 val_377 2010-03-23 11
+378 val_378 2010-03-23 11
+378 val_378 2010-03-23 11
+379 val_379 2010-03-23 11
+379 val_379 2010-03-23 11
+382 val_382 2010-03-23 11
+382 val_382 2010-03-23 11
+382 val_382 2010-03-23 11
+382 val_382 2010-03-23 11
+384 val_384 2010-03-23 11
+384 val_384 2010-03-23 11
+384 val_384 2010-03-23 11
+384 val_384 2010-03-23 11
+384 val_384 2010-03-23 11
+384 val_384 2010-03-23 11
+386 val_386 2010-03-23 11
+386 val_386 2010-03-23 11
+389 val_389 2010-03-23 11
+389 val_389 2010-03-23 11
+392 val_392 2010-03-23 11
+392 val_392 2010-03-23 11
+393 val_393 2010-03-23 11
+393 val_393 2010-03-23 11
+394 val_394 2010-03-23 11
+394 val_394 2010-03-23 11
+395 val_395 2010-03-23 11
+395 val_395 2010-03-23 11
+395 val_395 2010-03-23 11
+395 val_395 2010-03-23 11
+396 val_396 2010-03-23 11
+396 val_396 2010-03-23 11
+396 val_396 2010-03-23 11
+396 val_396 2010-03-23 11
+396 val_396 2010-03-23 11
+396 val_396 2010-03-23 11
+397 val_397 2010-03-23 11
+397 val_397 2010-03-23 11
+397 val_397 2010-03-23 11
+397 val_397 2010-03-23 11
+399 val_399 2010-03-23 11
+399 val_399 2010-03-23 11
+399 val_399 2010-03-23 11
+399 val_399 2010-03-23 11
+4 val_4 2010-03-23 11
+4 val_4 2010-03-23 11
+400 val_400 2010-03-23 11
+400 val_400 2010-03-23 11
+401 val_401 2010-03-23 11
+401 val_401 2010-03-23 11
+401 val_401 2010-03-23 11
+401 val_401 2010-03-23 11
+401 val_401 2010-03-23 11
+401 val_401 2010-03-23 11
+401 val_401 2010-03-23 11
+401 val_401 2010-03-23 11
+401 val_401 2010-03-23 11
+401 val_401 2010-03-23 11
+402 val_402 2010-03-23 11
+402 val_402 2010-03-23 11
+403 val_403 2010-03-23 11
+403 val_403 2010-03-23 11
+403 val_403 2010-03-23 11
+403 val_403 2010-03-23 11
+403 val_403 2010-03-23 11
+403 val_403 2010-03-23 11
+404 val_404 2010-03-23 11
+404 val_404 2010-03-23 11
+404 val_404 2010-03-23 11
+404 val_404 2010-03-23 11
+406 val_406 2010-03-23 11
+406 val_406 2010-03-23 11
+406 val_406 2010-03-23 11
+406 val_406 2010-03-23 11
+406 val_406 2010-03-23 11
+406 val_406 2010-03-23 11
+406 val_406 2010-03-23 11
+406 val_406 2010-03-23 11
+407 val_407 2010-03-23 11
+407 val_407 2010-03-23 11
+409 val_409 2010-03-23 11
+409 val_409 2010-03-23 11
+409 val_409 2010-03-23 11
+409 val_409 2010-03-23 11
+409 val_409 2010-03-23 11
+409 val_409 2010-03-23 11
+41 val_41 2010-03-23 11
+41 val_41 2010-03-23 11
+411 val_411 2010-03-23 11
+411 val_411 2010-03-23 11
+413 val_413 2010-03-23 11
+413 val_413 2010-03-23 11
+413 val_413 2010-03-23 11
+413 val_413 2010-03-23 11
+414 val_414 2010-03-23 11
+414 val_414 2010-03-23 11
+414 val_414 2010-03-23 11
+414 val_414 2010-03-23 11
+417 val_417 2010-03-23 11
+417 val_417 2010-03-23 11
+417 val_417 2010-03-23 11
+417 val_417 2010-03-23 11
+417 val_417 2010-03-23 11
+417 val_417 2010-03-23 11
+418 val_418 2010-03-23 11
+418 val_418 2010-03-23 11
+419 val_419 2010-03-23 11
+419 val_419 2010-03-23 11
+42 val_42 2010-03-23 11
+42 val_42 2010-03-23 11
+42 val_42 2010-03-23 11
+42 val_42 2010-03-23 11
+421 val_421 2010-03-23 11
+421 val_421 2010-03-23 11
+424 val_424 2010-03-23 11
+424 val_424 2010-03-23 11
+424 val_424 2010-03-23 11
+424 val_424 2010-03-23 11
+427 val_427 2010-03-23 11
+427 val_427 2010-03-23 11
+429 val_429 2010-03-23 11
+429 val_429 2010-03-23 11
+429 val_429 2010-03-23 11
+429 val_429 2010-03-23 11
+43 val_43 2010-03-23 11
+43 val_43 2010-03-23 11
+430 val_430 2010-03-23 11
+430 val_430 2010-03-23 11
+430 val_430 2010-03-23 11
+430 val_430 2010-03-23 11
+430 val_430 2010-03-23 11
+430 val_430 2010-03-23 11
+431 val_431 2010-03-23 11
+431 val_431 2010-03-23 11
+431 val_431 2010-03-23 11
+431 val_431 2010-03-23 11
+431 val_431 2010-03-23 11
+431 val_431 2010-03-23 11
+432 val_432 2010-03-23 11
+432 val_432 2010-03-23 11
+435 val_435 2010-03-23 11
+435 val_435 2010-03-23 11
+436 val_436 2010-03-23 11
+436 val_436 2010-03-23 11
+437 val_437 2010-03-23 11
+437 val_437 2010-03-23 11
+438 val_438 2010-03-23 11
+438 val_438 2010-03-23 11
+438 val_438 2010-03-23 11
+438 val_438 2010-03-23 11
+438 val_438 2010-03-23 11
+438 val_438 2010-03-23 11
+439 val_439 2010-03-23 11
+439 val_439 2010-03-23 11
+439 val_439 2010-03-23 11
+439 val_439 2010-03-23 11
+44 val_44 2010-03-23 11
+44 val_44 2010-03-23 11
+443 val_443 2010-03-23 11
+443 val_443 2010-03-23 11
+444 val_444 2010-03-23 11
+444 val_444 2010-03-23 11
+446 val_446 2010-03-23 11
+446 val_446 2010-03-23 11
+448 val_448 2010-03-23 11
+448 val_448 2010-03-23 11
+449 val_449 2010-03-23 11
+449 val_449 2010-03-23 11
+452 val_452 2010-03-23 11
+452 val_452 2010-03-23 11
+453 val_453 2010-03-23 11
+453 val_453 2010-03-23 11
+454 val_454 2010-03-23 11
+454 val_454 2010-03-23 11
+454 val_454 2010-03-23 11
+454 val_454 2010-03-23 11
+454 val_454 2010-03-23 11
+454 val_454 2010-03-23 11
+455 val_455 2010-03-23 11
+455 val_455 2010-03-23 11
+457 val_457 2010-03-23 11
+457 val_457 2010-03-23 11
+458 val_458 2010-03-23 11
+458 val_458 2010-03-23 11
+458 val_458 2010-03-23 11
+458 val_458 2010-03-23 11
+459 val_459 2010-03-23 11
+459 val_459 2010-03-23 11
+459 val_459 2010-03-23 11
+459 val_459 2010-03-23 11
+460 val_460 2010-03-23 11
+460 val_460 2010-03-23 11
+462 val_462 2010-03-23 11
+462 val_462 2010-03-23 11
+462 val_462 2010-03-23 11
+462 val_462 2010-03-23 11
+463 val_463 2010-03-23 11
+463 val_463 2010-03-23 11
+463 val_463 2010-03-23 11
+463 val_463 2010-03-23 11
+466 val_466 2010-03-23 11
+466 val_466 2010-03-23 11
+466 val_466 2010-03-23 11
+466 val_466 2010-03-23 11
+466 val_466 2010-03-23 11
+466 val_466 2010-03-23 11
+467 val_467 2010-03-23 11
+467 val_467 2010-03-23 11
+468 val_468 2010-03-23 11
+468 val_468 2010-03-23 11
+468 val_468 2010-03-23 11
+468 val_468 2010-03-23 11
+468 val_468 2010-03-23 11
+468 val_468 2010-03-23 11
+468 val_468 2010-03-23 11
+468 val_468 2010-03-23 11
+469 val_469 2010-03-23 11
+469 val_469 2010-03-23 11
+469 val_469 2010-03-23 11
+469 val_469 2010-03-23 11
+469 val_469 2010-03-23 11
+469 val_469 2010-03-23 11
+469 val_469 2010-03-23 11
+469 val_469 2010-03-23 11
+469 val_469 2010-03-23 11
+469 val_469 2010-03-23 11
+47 val_47 2010-03-23 11
+47 val_47 2010-03-23 11
+470 val_470 2010-03-23 11
+470 val_470 2010-03-23 11
+472 val_472 2010-03-23 11
+472 val_472 2010-03-23 11
+475 val_475 2010-03-23 11
+475 val_475 2010-03-23 11
+477 val_477 2010-03-23 11
+477 val_477 2010-03-23 11
+478 val_478 2010-03-23 11
+478 val_478 2010-03-23 11
+478 val_478 2010-03-23 11
+478 val_478 2010-03-23 11
+479 val_479 2010-03-23 11
+479 val_479 2010-03-23 11
+480 val_480 2010-03-23 11
+480 val_480 2010-03-23 11
+480 val_480 2010-03-23 11
+480 val_480 2010-03-23 11
+480 val_480 2010-03-23 11
+480 val_480 2010-03-23 11
+481 val_481 2010-03-23 11
+481 val_481 2010-03-23 11
+482 val_482 2010-03-23 11
+482 val_482 2010-03-23 11
+483 val_483 2010-03-23 11
+483 val_483 2010-03-23 11
+484 val_484 2010-03-23 11
+484 val_484 2010-03-23 11
+485 val_485 2010-03-23 11
+485 val_485 2010-03-23 11
+487 val_487 2010-03-23 11
+487 val_487 2010-03-23 11
+489 val_489 2010-03-23 11
+489 val_489 2010-03-23 11
+489 val_489 2010-03-23 11
+489 val_489 2010-03-23 11
+489 val_489 2010-03-23 11
+489 val_489 2010-03-23 11
+489 val_489 2010-03-23 11
+489 val_489 2010-03-23 11
+490 val_490 2010-03-23 11
+490 val_490 2010-03-23 11
+491 val_491 2010-03-23 11
+491 val_491 2010-03-23 11
+492 val_492 2010-03-23 11
+492 val_492 2010-03-23 11
+492 val_492 2010-03-23 11
+492 val_492 2010-03-23 11
+493 val_493 2010-03-23 11
+493 val_493 2010-03-23 11
+494 val_494 2010-03-23 11
+494 val_494 2010-03-23 11
+495 val_495 2010-03-23 11
+495 val_495 2010-03-23 11
+496 val_496 2010-03-23 11
+496 val_496 2010-03-23 11
+497 val_497 2010-03-23 11
+497 val_497 2010-03-23 11
+498 val_498 2010-03-23 11
+498 val_498 2010-03-23 11
+498 val_498 2010-03-23 11
+498 val_498 2010-03-23 11
+498 val_498 2010-03-23 11
+498 val_498 2010-03-23 11
+5 val_5 2010-03-23 11
+5 val_5 2010-03-23 11
+5 val_5 2010-03-23 11
+5 val_5 2010-03-23 11
+5 val_5 2010-03-23 11
+5 val_5 2010-03-23 11
+51 val_51 2010-03-23 11
+51 val_51 2010-03-23 11
+51 val_51 2010-03-23 11
+51 val_51 2010-03-23 11
+53 val_53 2010-03-23 11
+53 val_53 2010-03-23 11
+54 val_54 2010-03-23 11
+54 val_54 2010-03-23 11
+57 val_57 2010-03-23 11
+57 val_57 2010-03-23 11
+58 val_58 2010-03-23 11
+58 val_58 2010-03-23 11
+58 val_58 2010-03-23 11
+58 val_58 2010-03-23 11
+64 val_64 2010-03-23 11
+64 val_64 2010-03-23 11
+65 val_65 2010-03-23 11
+65 val_65 2010-03-23 11
+66 val_66 2010-03-23 11
+66 val_66 2010-03-23 11
+67 val_67 2010-03-23 11
+67 val_67 2010-03-23 11
+67 val_67 2010-03-23 11
+67 val_67 2010-03-23 11
+69 val_69 2010-03-23 11
+69 val_69 2010-03-23 11
+70 val_70 2010-03-23 11
+70 val_70 2010-03-23 11
+70 val_70 2010-03-23 11
+70 val_70 2010-03-23 11
+70 val_70 2010-03-23 11
+70 val_70 2010-03-23 11
+72 val_72 2010-03-23 11
+72 val_72 2010-03-23 11
+72 val_72 2010-03-23 11
+72 val_72 2010-03-23 11
+74 val_74 2010-03-23 11
+74 val_74 2010-03-23 11
+76 val_76 2010-03-23 11
+76 val_76 2010-03-23 11
+76 val_76 2010-03-23 11
+76 val_76 2010-03-23 11
+77 val_77 2010-03-23 11
+77 val_77 2010-03-23 11
+78 val_78 2010-03-23 11
+78 val_78 2010-03-23 11
+8 val_8 2010-03-23 11
+8 val_8 2010-03-23 11
+80 val_80 2010-03-23 11
+80 val_80 2010-03-23 11
+82 val_82 2010-03-23 11
+82 val_82 2010-03-23 11
+83 val_83 2010-03-23 11
+83 val_83 2010-03-23 11
+83 val_83 2010-03-23 11
+83 val_83 2010-03-23 11
+84 val_84 2010-03-23 11
+84 val_84 2010-03-23 11
+84 val_84 2010-03-23 11
+84 val_84 2010-03-23 11
+85 val_85 2010-03-23 11
+85 val_85 2010-03-23 11
+86 val_86 2010-03-23 11
+86 val_86 2010-03-23 11
+87 val_87 2010-03-23 11
+87 val_87 2010-03-23 11
+9 val_9 2010-03-23 11
+9 val_9 2010-03-23 11
+90 val_90 2010-03-23 11
+90 val_90 2010-03-23 11
+90 val_90 2010-03-23 11
+90 val_90 2010-03-23 11
+90 val_90 2010-03-23 11
+90 val_90 2010-03-23 11
+92 val_92 2010-03-23 11
+92 val_92 2010-03-23 11
+95 val_95 2010-03-23 11
+95 val_95 2010-03-23 11
+95 val_95 2010-03-23 11
+95 val_95 2010-03-23 11
+96 val_96 2010-03-23 11
+96 val_96 2010-03-23 11
+97 val_97 2010-03-23 11
+97 val_97 2010-03-23 11
+97 val_97 2010-03-23 11
+97 val_97 2010-03-23 11
+98 val_98 2010-03-23 11
+98 val_98 2010-03-23 11
+98 val_98 2010-03-23 11
+98 val_98 2010-03-23 11
+PREHOOK: query: select * from nzhang_part_bucket where ds='2010-03-23' and hr='12'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part_bucket
+PREHOOK: Input: default@nzhang_part_bucket@ds=2010-03-23/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_part_bucket where ds='2010-03-23' and hr='12'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part_bucket
+POSTHOOK: Input: default@nzhang_part_bucket@ds=2010-03-23/hr=12
+#### A masked pattern was here ####
+0 val_0 2010-03-23 12
+0 val_0 2010-03-23 12
+0 val_0 2010-03-23 12
+0 val_0 2010-03-23 12
+0 val_0 2010-03-23 12
+0 val_0 2010-03-23 12
+10 val_10 2010-03-23 12
+10 val_10 2010-03-23 12
+100 val_100 2010-03-23 12
+100 val_100 2010-03-23 12
+100 val_100 2010-03-23 12
+100 val_100 2010-03-23 12
+103 val_103 2010-03-23 12
+103 val_103 2010-03-23 12
+103 val_103 2010-03-23 12
+103 val_103 2010-03-23 12
+104 val_104 2010-03-23 12
+104 val_104 2010-03-23 12
+104 val_104 2010-03-23 12
+104 val_104 2010-03-23 12
+105 val_105 2010-03-23 12
+105 val_105 2010-03-23 12
+11 val_11 2010-03-23 12
+11 val_11 2010-03-23 12
+111 val_111 2010-03-23 12
+111 val_111 2010-03-23 12
+113 val_113 2010-03-23 12
+113 val_113 2010-03-23 12
+113 val_113 2010-03-23 12
+113 val_113 2010-03-23 12
+114 val_114 2010-03-23 12
+114 val_114 2010-03-23 12
+116 val_116 2010-03-23 12
+116 val_116 2010-03-23 12
+118 val_118 2010-03-23 12
+118 val_118 2010-03-23 12
+118 val_118 2010-03-23 12
+118 val_118 2010-03-23 12
+119 val_119 2010-03-23 12
+119 val_119 2010-03-23 12
+119 val_119 2010-03-23 12
+119 val_119 2010-03-23 12
+119 val_119 2010-03-23 12
+119 val_119 2010-03-23 12
+12 val_12 2010-03-23 12
+12 val_12 2010-03-23 12
+12 val_12 2010-03-23 12
+12 val_12 2010-03-23 12
+120 val_120 2010-03-23 12
+120 val_120 2010-03-23 12
+120 val_120 2010-03-23 12
+120 val_120 2010-03-23 12
+125 val_125 2010-03-23 12
+125 val_125 2010-03-23 12
+125 val_125 2010-03-23 12
+125 val_125 2010-03-23 12
+126 val_126 2010-03-23 12
+126 val_126 2010-03-23 12
+128 val_128 2010-03-23 12
+128 val_128 2010-03-23 12
+128 val_128 2010-03-23 12
+128 val_128 2010-03-23 12
+128 val_128 2010-03-23 12
+128 val_128 2010-03-23 12
+129 val_129 2010-03-23 12
+129 val_129 2010-03-23 12
+129 val_129 2010-03-23 12
+129 val_129 2010-03-23 12
+131 val_131 2010-03-23 12
+131 val_131 2010-03-23 12
+133 val_133 2010-03-23 12
+133 val_133 2010-03-23 12
+134 val_134 2010-03-23 12
+134 val_134 2010-03-23 12
+134 val_134 2010-03-23 12
+134 val_134 2010-03-23 12
+136 val_136 2010-03-23 12
+136 val_136 2010-03-23 12
+137 val_137 2010-03-23 12
+137 val_137 2010-03-23 12
+137 val_137 2010-03-23 12
+137 val_137 2010-03-23 12
+138 val_138 2010-03-23 12
+138 val_138 2010-03-23 12
+138 val_138 2010-03-23 12
+138 val_138 2010-03-23 12
+138 val_138 2010-03-23 12
+138 val_138 2010-03-23 12
+138 val_138 2010-03-23 12
+138 val_138 2010-03-23 12
+143 val_143 2010-03-23 12
+143 val_143 2010-03-23 12
+145 val_145 2010-03-23 12
+145 val_145 2010-03-23 12
+146 val_146 2010-03-23 12
+146 val_146 2010-03-23 12
+146 val_146 2010-03-23 12
+146 val_146 2010-03-23 12
+149 val_149 2010-03-23 12
+149 val_149 2010-03-23 12
+149 val_149 2010-03-23 12
+149 val_149 2010-03-23 12
+15 val_15 2010-03-23 12
+15 val_15 2010-03-23 12
+15 val_15 2010-03-23 12
+15 val_15 2010-03-23 12
+150 val_150 2010-03-23 12
+150 val_150 2010-03-23 12
+152 val_152 2010-03-23 12
+152 val_152 2010-03-23 12
+152 val_152 2010-03-23 12
+152 val_152 2010-03-23 12
+153 val_153 2010-03-23 12
+153 val_153 2010-03-23 12
+155 val_155 2010-03-23 12
+155 val_155 2010-03-23 12
+156 val_156 2010-03-23 12
+156 val_156 2010-03-23 12
+157 val_157 2010-03-23 12
+157 val_157 2010-03-23 12
+158 val_158 2010-03-23 12
+158 val_158 2010-03-23 12
+160 val_160 2010-03-23 12
+160 val_160 2010-03-23 12
+162 val_162 2010-03-23 12
+162 val_162 2010-03-23 12
+163 val_163 2010-03-23 12
+163 val_163 2010-03-23 12
+164 val_164 2010-03-23 12
+164 val_164 2010-03-23 12
+164 val_164 2010-03-23 12
+164 val_164 2010-03-23 12
+165 val_165 2010-03-23 12
+165 val_165 2010-03-23 12
+165 val_165 2010-03-23 12
+165 val_165 2010-03-23 12
+166 val_166 2010-03-23 12
+166 val_166 2010-03-23 12
+167 val_167 2010-03-23 12
+167 val_167 2010-03-23 12
+167 val_167 2010-03-23 12
+167 val_167 2010-03-23 12
+167 val_167 2010-03-23 12
+167 val_167 2010-03-23 12
+168 val_168 2010-03-23 12
+168 val_168 2010-03-23 12
+169 val_169 2010-03-23 12
+169 val_169 2010-03-23 12
+169 val_169 2010-03-23 12
+169 val_169 2010-03-23 12
+169 val_169 2010-03-23 12
+169 val_169 2010-03-23 12
+169 val_169 2010-03-23 12
+169 val_169 2010-03-23 12
+17 val_17 2010-03-23 12
+17 val_17 2010-03-23 12
+170 val_170 2010-03-23 12
+170 val_170 2010-03-23 12
+172 val_172 2010-03-23 12
+172 val_172 2010-03-23 12
+172 val_172 2010-03-23 12
+172 val_172 2010-03-23 12
+174 val_174 2010-03-23 12
+174 val_174 2010-03-23 12
+174 val_174 2010-03-23 12
+174 val_174 2010-03-23 12
+175 val_175 2010-03-23 12
+175 val_175 2010-03-23 12
+175 val_175 2010-03-23 12
+175 val_175 2010-03-23 12
+176 val_176 2010-03-23 12
+176 val_176 2010-03-23 12
+176 val_176 2010-03-23 12
+176 val_176 2010-03-23 12
+177 val_177 2010-03-23 12
+177 val_177 2010-03-23 12
+178 val_178 2010-03-23 12
+178 val_178 2010-03-23 12
+179 val_179 2010-03-23 12
+179 val_179 2010-03-23 12
+179 val_179 2010-03-23 12
+179 val_179 2010-03-23 12
+18 val_18 2010-03-23 12
+18 val_18 2010-03-23 12
+18 val_18 2010-03-23 12
+18 val_18 2010-03-23 12
+180 val_180 2010-03-23 12
+180 val_180 2010-03-23 12
+181 val_181 2010-03-23 12
+181 val_181 2010-03-23 12
+183 val_183 2010-03-23 12
+183 val_183 2010-03-23 12
+186 val_186 2010-03-23 12
+186 val_186 2010-03-23 12
+187 val_187 2010-03-23 12
+187 val_187 2010-03-23 12
+187 val_187 2010-03-23 12
+187 val_187 2010-03-23 12
+187 val_187 2010-03-23 12
+187 val_187 2010-03-23 12
+189 val_189 2010-03-23 12
+189 val_189 2010-03-23 12
+19 val_19 2010-03-23 12
+19 val_19 2010-03-23 12
+190 val_190 2010-03-23 12
+190 val_190 2010-03-23 12
+191 val_191 2010-03-23 12
+191 val_191 2010-03-23 12
+191 val_191 2010-03-23 12
+191 val_191 2010-03-23 12
+192 val_192 2010-03-23 12
+192 val_192 2010-03-23 12
+193 val_193 2010-03-23 12
+193 val_193 2010-03-23 12
+193 val_193 2010-03-23 12
+193 val_193 2010-03-23 12
+193 val_193 2010-03-23 12
+193 val_193 2010-03-23 12
+194 val_194 2010-03-23 12
+194 val_194 2010-03-23 12
+195 val_195 2010-03-23 12
+195 val_195 2010-03-23 12
+195 val_195 2010-03-23 12
+195 val_195 2010-03-23 12
+196 val_196 2010-03-23 12
+196 val_196 2010-03-23 12
+197 val_197 2010-03-23 12
+197 val_197 2010-03-23 12
+197 val_197 2010-03-23 12
+197 val_197 2010-03-23 12
+199 val_199 2010-03-23 12
+199 val_199 2010-03-23 12
+199 val_199 2010-03-23 12
+199 val_199 2010-03-23 12
+199 val_199 2010-03-23 12
+199 val_199 2010-03-23 12
+2 val_2 2010-03-23 12
+2 val_2 2010-03-23 12
+20 val_20 2010-03-23 12
+20 val_20 2010-03-23 12
+200 val_200 2010-03-23 12
+200 val_200 2010-03-23 12
+200 val_200 2010-03-23 12
+200 val_200 2010-03-23 12
+201 val_201 2010-03-23 12
+201 val_201 2010-03-23 12
+202 val_202 2010-03-23 12
+202 val_202 2010-03-23 12
+203 val_203 2010-03-23 12
+203 val_203 2010-03-23 12
+203 val_203 2010-03-23 12
+203 val_203 2010-03-23 12
+205 val_205 2010-03-23 12
+205 val_205 2010-03-23 12
+205 val_205 2010-03-23 12
+205 val_205 2010-03-23 12
+207 val_207 2010-03-23 12
+207 val_207 2010-03-23 12
+207 val_207 2010-03-23 12
+207 val_207 2010-03-23 12
+208 val_208 2010-03-23 12
+208 val_208 2010-03-23 12
+208 val_208 2010-03-23 12
+208 val_208 2010-03-23 12
+208 val_208 2010-03-23 12
+208 val_208 2010-03-23 12
+209 val_209 2010-03-23 12
+209 val_209 2010-03-23 12
+209 val_209 2010-03-23 12
+209 val_209 2010-03-23 12
+213 val_213 2010-03-23 12
+213 val_213 2010-03-23 12
+213 val_213 2010-03-23 12
+213 val_213 2010-03-23 12
+214 val_214 2010-03-23 12
+214 val_214 2010-03-23 12
+216 val_216 2010-03-23 12
+216 val_216 2010-03-23 12
+216 val_216 2010-03-23 12
+216 val_216 2010-03-23 12
+217 val_217 2010-03-23 12
+217 val_217 2010-03-23 12
+217 val_217 2010-03-23 12
+217 val_217 2010-03-23 12
+218 val_218 2010-03-23 12
+218 val_218 2010-03-23 12
+219 val_219 2010-03-23 12
+219 val_219 2010-03-23 12
+219 val_219 2010-03-23 12
+219 val_219 2010-03-23 12
+221 val_221 2010-03-23 12
+221 val_221 2010-03-23 12
+221 val_221 2010-03-23 12
+221 val_221 2010-03-23 12
+222 val_222 2010-03-23 12
+222 val_222 2010-03-23 12
+223 val_223 2010-03-23 12
+223 val_223 2010-03-23 12
+223 val_223 2010-03-23 12
+223 val_223 2010-03-23 12
+224 val_224 2010-03-23 12
+224 val_224 2010-03-23 12
+224 val_224 2010-03-23 12
+224 val_224 2010-03-23 12
+226 val_226 2010-03-23 12
+226 val_226 2010-03-23 12
+228 val_228 2010-03-23 12
+228 val_228 2010-03-23 12
+229 val_229 2010-03-23 12
+229 val_229 2010-03-23 12
+229 val_229 2010-03-23 12
+229 val_229 2010-03-23 12
+230 val_230 2010-03-23 12
+230 val_230 2010-03-23 12
+230 val_230 2010-03-23 12
+230 val_230 2010-03-23 12
+230 val_230 2010-03-23 12
+230 val_230 2010-03-23 12
+230 val_230 2010-03-23 12
+230 val_230 2010-03-23 12
+230 val_230 2010-03-23 12
+230 val_230 2010-03-23 12
+233 val_233 2010-03-23 12
+233 val_233 2010-03-23 12
+233 val_233 2010-03-23 12
+233 val_233 2010-03-23 12
+235 val_235 2010-03-23 12
+235 val_235 2010-03-23 12
+237 val_237 2010-03-23 12
+237 val_237 2010-03-23 12
+237 val_237 2010-03-23 12
+237 val_237 2010-03-23 12
+238 val_238 2010-03-23 12
+238 val_238 2010-03-23 12
+238 val_238 2010-03-23 12
+238 val_238 2010-03-23 12
+239 val_239 2010-03-23 12
+239 val_239 2010-03-23 12
+239 val_239 2010-03-23 12
+239 val_239 2010-03-23 12
+24 val_24 2010-03-23 12
+24 val_24 2010-03-23 12
+24 val_24 2010-03-23 12
+24 val_24 2010-03-23 12
+241 val_241 2010-03-23 12
+241 val_241 2010-03-23 12
+242 val_242 2010-03-23 12
+242 val_242 2010-03-23 12
+242 val_242 2010-03-23 12
+242 val_242 2010-03-23 12
+244 val_244 2010-03-23 12
+244 val_244 2010-03-23 12
+247 val_247 2010-03-23 12
+247 val_247 2010-03-23 12
+248 val_248 2010-03-23 12
+248 val_248 2010-03-23 12
+249 val_249 2010-03-23 12
+249 val_249 2010-03-23 12
+252 val_252 2010-03-23 12
+252 val_252 2010-03-23 12
+255 val_255 2010-03-23 12
+255 val_255 2010-03-23 12
+255 val_255 2010-03-23 12
+255 val_255 2010-03-23 12
+256 val_256 2010-03-23 12
+256 val_256 2010-03-23 12
+256 val_256 2010-03-23 12
+256 val_256 2010-03-23 12
+257 val_257 2010-03-23 12
+257 val_257 2010-03-23 12
+258 val_258 2010-03-23 12
+258 val_258 2010-03-23 12
+26 val_26 2010-03-23 12
+26 val_26 2010-03-23 12
+26 val_26 2010-03-23 12
+26 val_26 2010-03-23 12
+260 val_260 2010-03-23 12
+260 val_260 2010-03-23 12
+262 val_262 2010-03-23 12
+262 val_262 2010-03-23 12
+263 val_263 2010-03-23 12
+263 val_263 2010-03-23 12
+265 val_265 2010-03-23 12
+265 val_265 2010-03-23 12
+265 val_265 2010-03-23 12
+265 val_265 2010-03-23 12
+266 val_266 2010-03-23 12
+266 val_266 2010-03-23 12
+27 val_27 2010-03-23 12
+27 val_27 2010-03-23 12
+272 val_272 2010-03-23 12
+272 val_272 2010-03-23 12
+272 val_272 2010-03-23 12
+272 val_272 2010-03-23 12
+273 val_273 2010-03-23 12
+273 val_273 2010-03-23 12
+273 val_273 2010-03-23 12
+273 val_273 2010-03-23 12
+273 val_273 2010-03-23 12
+273 val_273 2010-03-23 12
+274 val_274 2010-03-23 12
+274 val_274 2010-03-23 12
+275 val_275 2010-03-23 12
+275 val_275 2010-03-23 12
+277 val_277 2010-03-23 12
+277 val_277 2010-03-23 12
+277 val_277 2010-03-23 12
+277 val_277 2010-03-23 12
+277 val_277 2010-03-23 12
+277 val_277 2010-03-23 12
+277 val_277 2010-03-23 12
+277 val_277 2010-03-23 12
+278 val_278 2010-03-23 12
+278 val_278 2010-03-23 12
+278 val_278 2010-03-23 12
+278 val_278 2010-03-23 12
+28 val_28 2010-03-23 12
+28 val_28 2010-03-23 12
+280 val_280 2010-03-23 12
+280 val_280 2010-03-23 12
+280 val_280 2010-03-23 12
+280 val_280 2010-03-23 12
+281 val_281 2010-03-23 12
+281 val_281 2010-03-23 12
+281 val_281 2010-03-23 12
+281 val_281 2010-03-23 12
+282 val_282 2010-03-23 12
+282 val_282 2010-03-23 12
+282 val_282 2010-03-23 12
+282 val_282 2010-03-23 12
+283 val_283 2010-03-23 12
+283 val_283 2010-03-23 12
+284 val_284 2010-03-23 12
+284 val_284 2010-03-23 12
+285 val_285 2010-03-23 12
+285 val_285 2010-03-23 12
+286 val_286 2010-03-23 12
+286 val_286 2010-03-23 12
+287 val_287 2010-03-23 12
+287 val_287 2010-03-23 12
+288 val_288 2010-03-23 12
+288 val_288 2010-03-23 12
+288 val_288 2010-03-23 12
+288 val_288 2010-03-23 12
+289 val_289 2010-03-23 12
+289 val_289 2010-03-23 12
+291 val_291 2010-03-23 12
+291 val_291 2010-03-23 12
+292 val_292 2010-03-23 12
+292 val_292 2010-03-23 12
+296 val_296 2010-03-23 12
+296 val_296 2010-03-23 12
+298 val_298 2010-03-23 12
+298 val_298 2010-03-23 12
+298 val_298 2010-03-23 12
+298 val_298 2010-03-23 12
+298 val_298 2010-03-23 12
+298 val_298 2010-03-23 12
+30 val_30 2010-03-23 12
+30 val_30 2010-03-23 12
+302 val_302 2010-03-23 12
+302 val_302 2010-03-23 12
+305 val_305 2010-03-23 12
+305 val_305 2010-03-23 12
+306 val_306 2010-03-23 12
+306 val_306 2010-03-23 12
+307 val_307 2010-03-23 12
+307 val_307 2010-03-23 12
+307 val_307 2010-03-23 12
+307 val_307 2010-03-23 12
+308 val_308 2010-03-23 12
+308 val_308 2010-03-23 12
+309 val_309 2010-03-23 12
+309 val_309 2010-03-23 12
+309 val_309 2010-03-23 12
+309 val_309 2010-03-23 12
+310 val_310 2010-03-23 12
+310 val_310 2010-03-23 12
+311 val_311 2010-03-23 12
+311 val_311 2010-03-23 12
+311 val_311 2010-03-23 12
+311 val_311 2010-03-23 12
+311 val_311 2010-03-23 12
+311 val_311 2010-03-23 12
+315 val_315 2010-03-23 12
+315 val_315 2010-03-23 12
+316 val_316 2010-03-23 12
+316 val_316 2010-03-23 12
+316 val_316 2010-03-23 12
+316 val_316 2010-03-23 12
+316 val_316 2010-03-23 12
+316 val_316 2010-03-23 12
+317 val_317 2010-03-23 12
+317 val_317 2010-03-23 12
+317 val_317 2010-03-23 12
+317 val_317 2010-03-23 12
+318 val_318 2010-03-23 12
+318 val_318 2010-03-23 12
+318 val_318 2010-03-23 12
+318 val_318 2010-03-23 12
+318 val_318 2010-03-23 12
+318 val_318 2010-03-23 12
+321 val_321 2010-03-23 12
+321 val_321 2010-03-23 12
+321 val_321 2010-03-23 12
+321 val_321 2010-03-23 12
+322 val_322 2010-03-23 12
+322 val_322 2010-03-23 12
+322 val_322 2010-03-23 12
+322 val_322 2010-03-23 12
+323 val_323 2010-03-23 12
+323 val_323 2010-03-23 12
+325 val_325 2010-03-23 12
+325 val_325 2010-03-23 12
+325 val_325 2010-03-23 12
+325 val_325 2010-03-23 12
+327 val_327 2010-03-23 12
+327 val_327 2010-03-23 12
+327 val_327 2010-03-23 12
+327 val_327 2010-03-23 12
+327 val_327 2010-03-23 12
+327 val_327 2010-03-23 12
+33 val_33 2010-03-23 12
+33 val_33 2010-03-23 12
+331 val_331 2010-03-23 12
+331 val_331 2010-03-23 12
+331 val_331 2010-03-23 12
+331 val_331 2010-03-23 12
+332 val_332 2010-03-23 12
+332 val_332 2010-03-23 12
+333 val_333 2010-03-23 12
+333 val_333 2010-03-23 12
+333 val_333 2010-03-23 12
+333 val_333 2010-03-23 12
+335 val_335 2010-03-23 12
+335 val_335 2010-03-23 12
+336 val_336 2010-03-23 12
+336 val_336 2010-03-23 12
+338 val_338 2010-03-23 12
+338 val_338 2010-03-23 12
+339 val_339 2010-03-23 12
+339 val_339 2010-03-23 12
+34 val_34 2010-03-23 12
+34 val_34 2010-03-23 12
+341 val_341 2010-03-23 12
+341 val_341 2010-03-23 12
+342 val_342 2010-03-23 12
+342 val_342 2010-03-23 12
+342 val_342 2010-03-23 12
+342 val_342 2010-03-23 12
+344 val_344 2010-03-23 12
+344 val_344 2010-03-23 12
+344 val_344 2010-03-23 12
+344 val_344 2010-03-23 12
+345 val_345 2010-03-23 12
+345 val_345 2010-03-23 12
+348 val_348 2010-03-23 12
+348 val_348 2010-03-23 12
+348 val_348 2010-03-23 12
+348 val_348 2010-03-23 12
+348 val_348 2010-03-23 12
+348 val_348 2010-03-23 12
+348 val_348 2010-03-23 12
+348 val_348 2010-03-23 12
+348 val_348 2010-03-23 12
+348 val_348 2010-03-23 12
+35 val_35 2010-03-23 12
+35 val_35 2010-03-23 12
+35 val_35 2010-03-23 12
+35 val_35 2010-03-23 12
+35 val_35 2010-03-23 12
+35 val_35 2010-03-23 12
+351 val_351 2010-03-23 12
+351 val_351 2010-03-23 12
+353 val_353 2010-03-23 12
+353 val_353 2010-03-23 12
+353 val_353 2010-03-23 12
+353 val_353 2010-03-23 12
+356 val_356 2010-03-23 12
+356 val_356 2010-03-23 12
+360 val_360 2010-03-23 12
+360 val_360 2010-03-23 12
+362 val_362 2010-03-23 12
+362 val_362 2010-03-23 12
+364 val_364 2010-03-23 12
+364 val_364 2010-03-23 12
+365 val_365 2010-03-23 12
+365 val_365 2010-03-23 12
+366 val_366 2010-03-23 12
+366 val_366 2010-03-23 12
+367 val_367 2010-03-23 12
+367 val_367 2010-03-23 12
+367 val_367 2010-03-23 12
+367 val_367 2010-03-23 12
+368 val_368 2010-03-23 12
+368 val_368 2010-03-23 12
+369 val_369 2010-03-23 12
+369 val_369 2010-03-23 12
+369 val_369 2010-03-23 12
+369 val_369 2010-03-23 12
+369 val_369 2010-03-23 12
+369 val_369 2010-03-23 12
+37 val_37 2010-03-23 12
+37 val_37 2010-03-23 12
+37 val_37 2010-03-23 12
+37 val_37 2010-03-23 12
+373 val_373 2010-03-23 12
+373 val_373 2010-03-23 12
+374 val_374 2010-03-23 12
+374 val_374 2010-03-23 12
+375 val_375 2010-03-23 12
+375 val_375 2010-03-23 12
+377 val_377 2010-03-23 12
+377 val_377 2010-03-23 12
+378 val_378 2010-03-23 12
+378 val_378 2010-03-23 12
+379 val_379 2010-03-23 12
+379 val_379 2010-03-23 12
+382 val_382 2010-03-23 12
+382 val_382 2010-03-23 12
+382 val_382 2010-03-23 12
+382 val_382 2010-03-23 12
+384 val_384 2010-03-23 12
+384 val_384 2010-03-23 12
+384 val_384 2010-03-23 12
+384 val_384 2010-03-23 12
+384 val_384 2010-03-23 12
+384 val_384 2010-03-23 12
+386 val_386 2010-03-23 12
+386 val_386 2010-03-23 12
+389 val_389 2010-03-23 12
+389 val_389 2010-03-23 12
+392 val_392 2010-03-23 12
+392 val_392 2010-03-23 12
+393 val_393 2010-03-23 12
+393 val_393 2010-03-23 12
+394 val_394 2010-03-23 12
+394 val_394 2010-03-23 12
+395 val_395 2010-03-23 12
+395 val_395 2010-03-23 12
+395 val_395 2010-03-23 12
+395 val_395 2010-03-23 12
+396 val_396 2010-03-23 12
+396 val_396 2010-03-23 12
+396 val_396 2010-03-23 12
+396 val_396 2010-03-23 12
+396 val_396 2010-03-23 12
+396 val_396 2010-03-23 12
+397 val_397 2010-03-23 12
+397 val_397 2010-03-23 12
+397 val_397 2010-03-23 12
+397 val_397 2010-03-23 12
+399 val_399 2010-03-23 12
+399 val_399 2010-03-23 12
+399 val_399 2010-03-23 12
+399 val_399 2010-03-23 12
+4 val_4 2010-03-23 12
+4 val_4 2010-03-23 12
+400 val_400 2010-03-23 12
+400 val_400 2010-03-23 12
+401 val_401 2010-03-23 12
+401 val_401 2010-03-23 12
+401 val_401 2010-03-23 12
+401 val_401 2010-03-23 12
+401 val_401 2010-03-23 12
+401 val_401 2010-03-23 12
+401 val_401 2010-03-23 12
+401 val_401 2010-03-23 12
+401 val_401 2010-03-23 12
+401 val_401 2010-03-23 12
+402 val_402 2010-03-23 12
+402 val_402 2010-03-23 12
+403 val_403 2010-03-23 12
+403 val_403 2010-03-23 12
+403 val_403 2010-03-23 12
+403 val_403 2010-03-23 12
+403 val_403 2010-03-23 12
+403 val_403 2010-03-23 12
+404 val_404 2010-03-23 12
+404 val_404 2010-03-23 12
+404 val_404 2010-03-23 12
+404 val_404 2010-03-23 12
+406 val_406 2010-03-23 12
+406 val_406 2010-03-23 12
+406 val_406 2010-03-23 12
+406 val_406 2010-03-23 12
+406 val_406 2010-03-23 12
+406 val_406 2010-03-23 12
+406 val_406 2010-03-23 12
+406 val_406 2010-03-23 12
+407 val_407 2010-03-23 12
+407 val_407 2010-03-23 12
+409 val_409 2010-03-23 12
+409 val_409 2010-03-23 12
+409 val_409 2010-03-23 12
+409 val_409 2010-03-23 12
+409 val_409 2010-03-23 12
+409 val_409 2010-03-23 12
+41 val_41 2010-03-23 12
+41 val_41 2010-03-23 12
+411 val_411 2010-03-23 12
+411 val_411 2010-03-23 12
+413 val_413 2010-03-23 12
+413 val_413 2010-03-23 12
+413 val_413 2010-03-23 12
+413 val_413 2010-03-23 12
+414 val_414 2010-03-23 12
+414 val_414 2010-03-23 12
+414 val_414 2010-03-23 12
+414 val_414 2010-03-23 12
+417 val_417 2010-03-23 12
+417 val_417 2010-03-23 12
+417 val_417 2010-03-23 12
+417 val_417 2010-03-23 12
+417 val_417 2010-03-23 12
+417 val_417 2010-03-23 12
+418 val_418 2010-03-23 12
+418 val_418 2010-03-23 12
+419 val_419 2010-03-23 12
+419 val_419 2010-03-23 12
+42 val_42 2010-03-23 12
+42 val_42 2010-03-23 12
+42 val_42 2010-03-23 12
+42 val_42 2010-03-23 12
+421 val_421 2010-03-23 12
+421 val_421 2010-03-23 12
+424 val_424 2010-03-23 12
+424 val_424 2010-03-23 12
+424 val_424 2010-03-23 12
+424 val_424 2010-03-23 12
+427 val_427 2010-03-23 12
+427 val_427 2010-03-23 12
+429 val_429 2010-03-23 12
+429 val_429 2010-03-23 12
+429 val_429 2010-03-23 12
+429 val_429 2010-03-23 12
+43 val_43 2010-03-23 12
+43 val_43 2010-03-23 12
+430 val_430 2010-03-23 12
+430 val_430 2010-03-23 12
+430 val_430 2010-03-23 12
+430 val_430 2010-03-23 12
+430 val_430 2010-03-23 12
+430 val_430 2010-03-23 12
+431 val_431 2010-03-23 12
+431 val_431 2010-03-23 12
+431 val_431 2010-03-23 12
+431 val_431 2010-03-23 12
+431 val_431 2010-03-23 12
+431 val_431 2010-03-23 12
+432 val_432 2010-03-23 12
+432 val_432 2010-03-23 12
+435 val_435 2010-03-23 12
+435 val_435 2010-03-23 12
+436 val_436 2010-03-23 12
+436 val_436 2010-03-23 12
+437 val_437 2010-03-23 12
+437 val_437 2010-03-23 12
+438 val_438 2010-03-23 12
+438 val_438 2010-03-23 12
+438 val_438 2010-03-23 12
+438 val_438 2010-03-23 12
+438 val_438 2010-03-23 12
+438 val_438 2010-03-23 12
+439 val_439 2010-03-23 12
+439 val_439 2010-03-23 12
+439 val_439 2010-03-23 12
+439 val_439 2010-03-23 12
+44 val_44 2010-03-23 12
+44 val_44 2010-03-23 12
+443 val_443 2010-03-23 12
+443 val_443 2010-03-23 12
+444 val_444 2010-03-23 12
+444 val_444 2010-03-23 12
+446 val_446 2010-03-23 12
+446 val_446 2010-03-23 12
+448 val_448 2010-03-23 12
+448 val_448 2010-03-23 12
+449 val_449 2010-03-23 12
+449 val_449 2010-03-23 12
+452 val_452 2010-03-23 12
+452 val_452 2010-03-23 12
+453 val_453 2010-03-23 12
+453 val_453 2010-03-23 12
+454 val_454 2010-03-23 12
+454 val_454 2010-03-23 12
+454 val_454 2010-03-23 12
+454 val_454 2010-03-23 12
+454 val_454 2010-03-23 12
+454 val_454 2010-03-23 12
+455 val_455 2010-03-23 12
+455 val_455 2010-03-23 12
+457 val_457 2010-03-23 12
+457 val_457 2010-03-23 12
+458 val_458 2010-03-23 12
+458 val_458 2010-03-23 12
+458 val_458 2010-03-23 12
+458 val_458 2010-03-23 12
+459 val_459 2010-03-23 12
+459 val_459 2010-03-23 12
+459 val_459 2010-03-23 12
+459 val_459 2010-03-23 12
+460 val_460 2010-03-23 12
+460 val_460 2010-03-23 12
+462 val_462 2010-03-23 12
+462 val_462 2010-03-23 12
+462 val_462 2010-03-23 12
+462 val_462 2010-03-23 12
+463 val_463 2010-03-23 12
+463 val_463 2010-03-23 12
+463 val_463 2010-03-23 12
+463 val_463 2010-03-23 12
+466 val_466 2010-03-23 12
+466 val_466 2010-03-23 12
+466 val_466 2010-03-23 12
+466 val_466 2010-03-23 12
+466 val_466 2010-03-23 12
+466 val_466 2010-03-23 12
+467 val_467 2010-03-23 12
+467 val_467 2010-03-23 12
+468 val_468 2010-03-23 12
+468 val_468 2010-03-23 12
+468 val_468 2010-03-23 12
+468 val_468 2010-03-23 12
+468 val_468 2010-03-23 12
+468 val_468 2010-03-23 12
+468 val_468 2010-03-23 12
+468 val_468 2010-03-23 12
+469 val_469 2010-03-23 12
+469 val_469 2010-03-23 12
+469 val_469 2010-03-23 12
+469 val_469 2010-03-23 12
+469 val_469 2010-03-23 12
+469 val_469 2010-03-23 12
+469 val_469 2010-03-23 12
+469 val_469 2010-03-23 12
+469 val_469 2010-03-23 12
+469 val_469 2010-03-23 12
+47 val_47 2010-03-23 12
+47 val_47 2010-03-23 12
+470 val_470 2010-03-23 12
+470 val_470 2010-03-23 12
+472 val_472 2010-03-23 12
+472 val_472 2010-03-23 12
+475 val_475 2010-03-23 12
+475 val_475 2010-03-23 12
+477 val_477 2010-03-23 12
+477 val_477 2010-03-23 12
+478 val_478 2010-03-23 12
+478 val_478 2010-03-23 12
+478 val_478 2010-03-23 12
+478 val_478 2010-03-23 12
+479 val_479 2010-03-23 12
+479 val_479 2010-03-23 12
+480 val_480 2010-03-23 12
+480 val_480 2010-03-23 12
+480 val_480 2010-03-23 12
+480 val_480 2010-03-23 12
+480 val_480 2010-03-23 12
+480 val_480 2010-03-23 12
+481 val_481 2010-03-23 12
+481 val_481 2010-03-23 12
+482 val_482 2010-03-23 12
+482 val_482 2010-03-23 12
+483 val_483 2010-03-23 12
+483 val_483 2010-03-23 12
+484 val_484 2010-03-23 12
+484 val_484 2010-03-23 12
+485 val_485 2010-03-23 12
+485 val_485 2010-03-23 12
+487 val_487 2010-03-23 12
+487 val_487 2010-03-23 12
+489 val_489 2010-03-23 12
+489 val_489 2010-03-23 12
+489 val_489 2010-03-23 12
+489 val_489 2010-03-23 12
+489 val_489 2010-03-23 12
+489 val_489 2010-03-23 12
+489 val_489 2010-03-23 12
+489 val_489 2010-03-23 12
+490 val_490 2010-03-23 12
+490 val_490 2010-03-23 12
+491 val_491 2010-03-23 12
+491 val_491 2010-03-23 12
+492 val_492 2010-03-23 12
+492 val_492 2010-03-23 12
+492 val_492 2010-03-23 12
+492 val_492 2010-03-23 12
+493 val_493 2010-03-23 12
+493 val_493 2010-03-23 12
+494 val_494 2010-03-23 12
+494 val_494 2010-03-23 12
+495 val_495 2010-03-23 12
+495 val_495 2010-03-23 12
+496 val_496 2010-03-23 12
+496 val_496 2010-03-23 12
+497 val_497 2010-03-23 12
+497 val_497 2010-03-23 12
+498 val_498 2010-03-23 12
+498 val_498 2010-03-23 12
+498 val_498 2010-03-23 12
+498 val_498 2010-03-23 12
+498 val_498 2010-03-23 12
+498 val_498 2010-03-23 12
+5 val_5 2010-03-23 12
+5 val_5 2010-03-23 12
+5 val_5 2010-03-23 12
+5 val_5 2010-03-23 12
+5 val_5 2010-03-23 12
+5 val_5 2010-03-23 12
+51 val_51 2010-03-23 12
+51 val_51 2010-03-23 12
+51 val_51 2010-03-23 12
+51 val_51 2010-03-23 12
+53 val_53 2010-03-23 12
+53 val_53 2010-03-23 12
+54 val_54 2010-03-23 12
+54 val_54 2010-03-23 12
+57 val_57 2010-03-23 12
+57 val_57 2010-03-23 12
+58 val_58 2010-03-23 12
+58 val_58 2010-03-23 12
+58 val_58 2010-03-23 12
+58 val_58 2010-03-23 12
+64 val_64 2010-03-23 12
+64 val_64 2010-03-23 12
+65 val_65 2010-03-23 12
+65 val_65 2010-03-23 12
+66 val_66 2010-03-23 12
+66 val_66 2010-03-23 12
+67 val_67 2010-03-23 12
+67 val_67 2010-03-23 12
+67 val_67 2010-03-23 12
+67 val_67 2010-03-23 12
+69 val_69 2010-03-23 12
+69 val_69 2010-03-23 12
+70 val_70 2010-03-23 12
+70 val_70 2010-03-23 12
+70 val_70 2010-03-23 12
+70 val_70 2010-03-23 12
+70 val_70 2010-03-23 12
+70 val_70 2010-03-23 12
+72 val_72 2010-03-23 12
+72 val_72 2010-03-23 12
+72 val_72 2010-03-23 12
+72 val_72 2010-03-23 12
+74 val_74 2010-03-23 12
+74 val_74 2010-03-23 12
+76 val_76 2010-03-23 12
+76 val_76 2010-03-23 12
+76 val_76 2010-03-23 12
+76 val_76 2010-03-23 12
+77 val_77 2010-03-23 12
+77 val_77 2010-03-23 12
+78 val_78 2010-03-23 12
+78 val_78 2010-03-23 12
+8 val_8 2010-03-23 12
+8 val_8 2010-03-23 12
+80 val_80 2010-03-23 12
+80 val_80 2010-03-23 12
+82 val_82 2010-03-23 12
+82 val_82 2010-03-23 12
+83 val_83 2010-03-23 12
+83 val_83 2010-03-23 12
+83 val_83 2010-03-23 12
+83 val_83 2010-03-23 12
+84 val_84 2010-03-23 12
+84 val_84 2010-03-23 12
+84 val_84 2010-03-23 12
+84 val_84 2010-03-23 12
+85 val_85 2010-03-23 12
+85 val_85 2010-03-23 12
+86 val_86 2010-03-23 12
+86 val_86 2010-03-23 12
+87 val_87 2010-03-23 12
+87 val_87 2010-03-23 12
+9 val_9 2010-03-23 12
+9 val_9 2010-03-23 12
+90 val_90 2010-03-23 12
+90 val_90 2010-03-23 12
+90 val_90 2010-03-23 12
+90 val_90 2010-03-23 12
+90 val_90 2010-03-23 12
+90 val_90 2010-03-23 12
+92 val_92 2010-03-23 12
+92 val_92 2010-03-23 12
+95 val_95 2010-03-23 12
+95 val_95 2010-03-23 12
+95 val_95 2010-03-23 12
+95 val_95 2010-03-23 12
+96 val_96 2010-03-23 12
+96 val_96 2010-03-23 12
+97 val_97 2010-03-23 12
+97 val_97 2010-03-23 12
+97 val_97 2010-03-23 12
+97 val_97 2010-03-23 12
+98 val_98 2010-03-23 12
+98 val_98 2010-03-23 12
+98 val_98 2010-03-23 12
+98 val_98 2010-03-23 12
[12/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/merge2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/merge2.q.out b/ql/src/test/results/clientpositive/llap/merge2.q.out
new file mode 100644
index 0000000..e515db1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/merge2.q.out
@@ -0,0 +1,596 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table test1(key int, val int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table test1(key int, val int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test1
+PREHOOK: query: explain
+insert overwrite table test1
+select key, count(1) from src group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table test1
+select key, count(1) from src group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table test1
+select key, count(1) from src group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test1
+POSTHOOK: query: insert overwrite table test1
+select key, count(1) from src group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test1
+POSTHOOK: Lineage: test1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test1.val EXPRESSION [(src)src.null, ]
+PREHOOK: query: select * from test1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test1
+#### A masked pattern was here ####
+0 3
+10 1
+100 2
+103 2
+104 2
+105 1
+11 1
+111 1
+113 2
+114 1
+116 1
+118 2
+119 3
+12 2
+120 2
+125 2
+126 1
+128 3
+129 2
+131 1
+133 1
+134 2
+136 1
+137 2
+138 4
+143 1
+145 1
+146 2
+149 2
+15 2
+150 1
+152 2
+153 1
+155 1
+156 1
+157 1
+158 1
+160 1
+162 1
+163 1
+164 2
+165 2
+166 1
+167 3
+168 1
+169 4
+17 1
+170 1
+172 2
+174 2
+175 2
+176 2
+177 1
+178 1
+179 2
+18 2
+180 1
+181 1
+183 1
+186 1
+187 3
+189 1
+19 1
+190 1
+191 2
+192 1
+193 3
+194 1
+195 2
+196 1
+197 2
+199 3
+2 1
+20 1
+200 2
+201 1
+202 1
+203 2
+205 2
+207 2
+208 3
+209 2
+213 2
+214 1
+216 2
+217 2
+218 1
+219 2
+221 2
+222 1
+223 2
+224 2
+226 1
+228 1
+229 2
+230 5
+233 2
+235 1
+237 2
+238 2
+239 2
+24 2
+241 1
+242 2
+244 1
+247 1
+248 1
+249 1
+252 1
+255 2
+256 2
+257 1
+258 1
+26 2
+260 1
+262 1
+263 1
+265 2
+266 1
+27 1
+272 2
+273 3
+274 1
+275 1
+277 4
+278 2
+28 1
+280 2
+281 2
+282 2
+283 1
+284 1
+285 1
+286 1
+287 1
+288 2
+289 1
+291 1
+292 1
+296 1
+298 3
+30 1
+302 1
+305 1
+306 1
+307 2
+308 1
+309 2
+310 1
+311 3
+315 1
+316 3
+317 2
+318 3
+321 2
+322 2
+323 1
+325 2
+327 3
+33 1
+331 2
+332 1
+333 2
+335 1
+336 1
+338 1
+339 1
+34 1
+341 1
+342 2
+344 2
+345 1
+348 5
+35 3
+351 1
+353 2
+356 1
+360 1
+362 1
+364 1
+365 1
+366 1
+367 2
+368 1
+369 3
+37 2
+373 1
+374 1
+375 1
+377 1
+378 1
+379 1
+382 2
+384 3
+386 1
+389 1
+392 1
+393 1
+394 1
+395 2
+396 3
+397 2
+399 2
+4 1
+400 1
+401 5
+402 1
+403 3
+404 2
+406 4
+407 1
+409 3
+41 1
+411 1
+413 2
+414 2
+417 3
+418 1
+419 1
+42 2
+421 1
+424 2
+427 1
+429 2
+43 1
+430 3
+431 3
+432 1
+435 1
+436 1
+437 1
+438 3
+439 2
+44 1
+443 1
+444 1
+446 1
+448 1
+449 1
+452 1
+453 1
+454 3
+455 1
+457 1
+458 2
+459 2
+460 1
+462 2
+463 2
+466 3
+467 1
+468 4
+469 5
+47 1
+470 1
+472 1
+475 1
+477 1
+478 2
+479 1
+480 3
+481 1
+482 1
+483 1
+484 1
+485 1
+487 1
+489 4
+490 1
+491 1
+492 2
+493 1
+494 1
+495 1
+496 1
+497 1
+498 3
+5 3
+51 2
+53 1
+54 1
+57 1
+58 2
+64 1
+65 1
+66 1
+67 2
+69 1
+70 3
+72 2
+74 1
+76 2
+77 1
+78 1
+8 1
+80 1
+82 1
+83 2
+84 2
+85 1
+86 1
+87 1
+9 1
+90 3
+92 1
+95 2
+96 1
+97 2
+98 2
+PREHOOK: query: drop table test1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test1
+PREHOOK: Output: default@test1
+POSTHOOK: query: drop table test1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test1
+POSTHOOK: Output: default@test1
+PREHOOK: query: create table test_src(key string, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_src
+POSTHOOK: query: create table test_src(key string, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_src
+PREHOOK: query: create table test1(key string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test1
+POSTHOOK: query: create table test1(key string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test1
+PREHOOK: query: insert overwrite table test_src partition(ds='101') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_src@ds=101
+POSTHOOK: query: insert overwrite table test_src partition(ds='101') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_src@ds=101
+POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table test_src partition(ds='102') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_src@ds=102
+POSTHOOK: query: insert overwrite table test_src partition(ds='102') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_src@ds=102
+POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+insert overwrite table test1 select key from test_src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table test1 select key from test_src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: test_src
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test1
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table test1 select key from test_src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_src
+PREHOOK: Input: default@test_src@ds=101
+PREHOOK: Input: default@test_src@ds=102
+PREHOOK: Output: default@test1
+POSTHOOK: query: insert overwrite table test1 select key from test_src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_src
+POSTHOOK: Input: default@test_src@ds=101
+POSTHOOK: Input: default@test_src@ds=102
+POSTHOOK: Output: default@test1
+POSTHOOK: Lineage: test1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: explain
+insert overwrite table test1 select key from test_src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table test1 select key from test_src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: test_src
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test1
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table test1 select key from test_src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_src
+PREHOOK: Input: default@test_src@ds=101
+PREHOOK: Input: default@test_src@ds=102
+PREHOOK: Output: default@test1
+POSTHOOK: query: insert overwrite table test1 select key from test_src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_src
+POSTHOOK: Input: default@test_src@ds=101
+POSTHOOK: Input: default@test_src@ds=102
+POSTHOOK: Output: default@test1
+POSTHOOK: Lineage: test1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ]
[13/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/mapjoin_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/mapjoin_mapjoin.q.out
new file mode 100644
index 0000000..17be306
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/mapjoin_mapjoin.q.out
@@ -0,0 +1,825 @@
+PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin
+
+-- SORT_QUERY_RESULTS
+
+explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin
+
+-- SORT_QUERY_RESULTS
+
+explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key)
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ srcpart
+ TOK_TABREF
+ TOK_TABNAME
+ src
+ =
+ .
+ TOK_TABLE_OR_COL
+ srcpart
+ value
+ .
+ TOK_TABLE_OR_COL
+ src
+ value
+ TOK_TABREF
+ TOK_TABNAME
+ src1
+ =
+ .
+ TOK_TABLE_OR_COL
+ srcpart
+ key
+ .
+ TOK_TABLE_OR_COL
+ src1
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ .
+ TOK_TABLE_OR_COL
+ srcpart
+ key
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE), Map 3 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: (key is not null and value is not null) (type: boolean)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 2 => 13
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 1 Map 2
+ Position of Big Table: 0
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 250
+ keys:
+ 0 _col1 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=11
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ hr 11
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=12
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ hr 12
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=11
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ hr 11
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=12
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ hr 12
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+ Truncated Path -> Alias:
+ /srcpart/ds=2008-04-08/hr=11 [srcpart]
+ /srcpart/ds=2008-04-08/hr=12 [srcpart]
+ /srcpart/ds=2008-04-09/hr=11 [srcpart]
+ /srcpart/ds=2008-04-09/hr=12 [srcpart]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: src1
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src1
+ numFiles 1
+ numRows 25
+ rawDataSize 191
+ serialization.ddl struct src1 { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 216
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src1
+ numFiles 1
+ numRows 25
+ rawDataSize 191
+ serialization.ddl struct src1 { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 216
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src1
+ name: default.src1
+ Truncated Path -> Alias:
+ /src1 [src1]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: src
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src
+ name: default.src
+ Truncated Path -> Alias:
+ /src [src]
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE), Map 3 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((value > 'val_450') and key is not null) and value is not null) (type: boolean)
+ Statistics: Num rows: 167 Data size: 1774 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 167 Data size: 1774 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 183 Data size: 1951 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col1 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 201 Data size: 2146 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 201 Data size: 2146 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: src1
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (value > 'val_450') (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: explain
+select count(*) from srcpart join src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from srcpart join src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (value is not null and key is not null) (type: boolean)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), ds (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col1 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col2
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col2
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col2 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: value (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: bigint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+66
+66
+66
+66
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+PREHOOK: query: select count(*) from srcpart join src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+5308
+5308
+PREHOOK: query: select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+66
+66
+66
+66
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+98
+PREHOOK: query: select count(*) from srcpart join src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+5308
+5308
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/mapreduce1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapreduce1.q.out b/ql/src/test/results/clientpositive/llap/mapreduce1.q.out
new file mode 100644
index 0000000..6eb5ecc
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/mapreduce1.q.out
@@ -0,0 +1,621 @@
+PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1
+MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value
+USING 'cat' AS (tkey, ten, one, tvalue)
+DISTRIBUTE BY tvalue, tkey
+SORT BY ten, one
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1
+MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value
+USING 'cat' AS (tkey, ten, one, tvalue)
+DISTRIBUTE BY tvalue, tkey
+SORT BY ten, one
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), UDFToInteger((key / 10)) (type: int), UDFToInteger((key % 10)) (type: int), value (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Transform Operator
+ command: cat
+ output info:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string), _col2 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col3 (type: string), _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: UDFToInteger(VALUE._col0) (type: int), UDFToInteger(VALUE._col1) (type: int), UDFToInteger(VALUE._col2) (type: int), VALUE._col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1
+MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value
+USING 'cat' AS (tkey, ten, one, tvalue)
+DISTRIBUTE BY tvalue, tkey
+SORT BY ten, one
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1
+MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value
+USING 'cat' AS (tkey, ten, one, tvalue)
+DISTRIBUTE BY tvalue, tkey
+SORT BY ten, one
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT dest1.* FROM dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT dest1.* FROM dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+0 0 0 val_0
+0 0 0 val_0
+0 0 0 val_0
+2 0 2 val_2
+4 0 4 val_4
+5 0 5 val_5
+5 0 5 val_5
+5 0 5 val_5
+8 0 8 val_8
+9 0 9 val_9
+10 1 0 val_10
+11 1 1 val_11
+12 1 2 val_12
+12 1 2 val_12
+15 1 5 val_15
+15 1 5 val_15
+17 1 7 val_17
+18 1 8 val_18
+18 1 8 val_18
+19 1 9 val_19
+100 10 0 val_100
+100 10 0 val_100
+103 10 3 val_103
+103 10 3 val_103
+104 10 4 val_104
+104 10 4 val_104
+105 10 5 val_105
+111 11 1 val_111
+113 11 3 val_113
+113 11 3 val_113
+114 11 4 val_114
+116 11 6 val_116
+118 11 8 val_118
+118 11 8 val_118
+119 11 9 val_119
+119 11 9 val_119
+119 11 9 val_119
+120 12 0 val_120
+120 12 0 val_120
+125 12 5 val_125
+125 12 5 val_125
+126 12 6 val_126
+128 12 8 val_128
+128 12 8 val_128
+128 12 8 val_128
+129 12 9 val_129
+129 12 9 val_129
+131 13 1 val_131
+133 13 3 val_133
+134 13 4 val_134
+134 13 4 val_134
+136 13 6 val_136
+137 13 7 val_137
+137 13 7 val_137
+138 13 8 val_138
+138 13 8 val_138
+138 13 8 val_138
+138 13 8 val_138
+143 14 3 val_143
+145 14 5 val_145
+146 14 6 val_146
+146 14 6 val_146
+149 14 9 val_149
+149 14 9 val_149
+150 15 0 val_150
+152 15 2 val_152
+152 15 2 val_152
+153 15 3 val_153
+155 15 5 val_155
+156 15 6 val_156
+157 15 7 val_157
+158 15 8 val_158
+160 16 0 val_160
+162 16 2 val_162
+163 16 3 val_163
+164 16 4 val_164
+164 16 4 val_164
+165 16 5 val_165
+165 16 5 val_165
+166 16 6 val_166
+167 16 7 val_167
+167 16 7 val_167
+167 16 7 val_167
+168 16 8 val_168
+169 16 9 val_169
+169 16 9 val_169
+169 16 9 val_169
+169 16 9 val_169
+170 17 0 val_170
+172 17 2 val_172
+172 17 2 val_172
+174 17 4 val_174
+174 17 4 val_174
+175 17 5 val_175
+175 17 5 val_175
+176 17 6 val_176
+176 17 6 val_176
+177 17 7 val_177
+178 17 8 val_178
+179 17 9 val_179
+179 17 9 val_179
+180 18 0 val_180
+181 18 1 val_181
+183 18 3 val_183
+186 18 6 val_186
+187 18 7 val_187
+187 18 7 val_187
+187 18 7 val_187
+189 18 9 val_189
+190 19 0 val_190
+191 19 1 val_191
+191 19 1 val_191
+192 19 2 val_192
+193 19 3 val_193
+193 19 3 val_193
+193 19 3 val_193
+194 19 4 val_194
+195 19 5 val_195
+195 19 5 val_195
+196 19 6 val_196
+197 19 7 val_197
+197 19 7 val_197
+199 19 9 val_199
+199 19 9 val_199
+199 19 9 val_199
+20 2 0 val_20
+24 2 4 val_24
+24 2 4 val_24
+26 2 6 val_26
+26 2 6 val_26
+27 2 7 val_27
+28 2 8 val_28
+200 20 0 val_200
+200 20 0 val_200
+201 20 1 val_201
+202 20 2 val_202
+203 20 3 val_203
+203 20 3 val_203
+205 20 5 val_205
+205 20 5 val_205
+207 20 7 val_207
+207 20 7 val_207
+208 20 8 val_208
+208 20 8 val_208
+208 20 8 val_208
+209 20 9 val_209
+209 20 9 val_209
+213 21 3 val_213
+213 21 3 val_213
+214 21 4 val_214
+216 21 6 val_216
+216 21 6 val_216
+217 21 7 val_217
+217 21 7 val_217
+218 21 8 val_218
+219 21 9 val_219
+219 21 9 val_219
+221 22 1 val_221
+221 22 1 val_221
+222 22 2 val_222
+223 22 3 val_223
+223 22 3 val_223
+224 22 4 val_224
+224 22 4 val_224
+226 22 6 val_226
+228 22 8 val_228
+229 22 9 val_229
+229 22 9 val_229
+230 23 0 val_230
+230 23 0 val_230
+230 23 0 val_230
+230 23 0 val_230
+230 23 0 val_230
+233 23 3 val_233
+233 23 3 val_233
+235 23 5 val_235
+237 23 7 val_237
+237 23 7 val_237
+238 23 8 val_238
+238 23 8 val_238
+239 23 9 val_239
+239 23 9 val_239
+241 24 1 val_241
+242 24 2 val_242
+242 24 2 val_242
+244 24 4 val_244
+247 24 7 val_247
+248 24 8 val_248
+249 24 9 val_249
+252 25 2 val_252
+255 25 5 val_255
+255 25 5 val_255
+256 25 6 val_256
+256 25 6 val_256
+257 25 7 val_257
+258 25 8 val_258
+260 26 0 val_260
+262 26 2 val_262
+263 26 3 val_263
+265 26 5 val_265
+265 26 5 val_265
+266 26 6 val_266
+272 27 2 val_272
+272 27 2 val_272
+273 27 3 val_273
+273 27 3 val_273
+273 27 3 val_273
+274 27 4 val_274
+275 27 5 val_275
+277 27 7 val_277
+277 27 7 val_277
+277 27 7 val_277
+277 27 7 val_277
+278 27 8 val_278
+278 27 8 val_278
+280 28 0 val_280
+280 28 0 val_280
+281 28 1 val_281
+281 28 1 val_281
+282 28 2 val_282
+282 28 2 val_282
+283 28 3 val_283
+284 28 4 val_284
+285 28 5 val_285
+286 28 6 val_286
+287 28 7 val_287
+288 28 8 val_288
+288 28 8 val_288
+289 28 9 val_289
+291 29 1 val_291
+292 29 2 val_292
+296 29 6 val_296
+298 29 8 val_298
+298 29 8 val_298
+298 29 8 val_298
+30 3 0 val_30
+33 3 3 val_33
+34 3 4 val_34
+35 3 5 val_35
+35 3 5 val_35
+35 3 5 val_35
+37 3 7 val_37
+37 3 7 val_37
+302 30 2 val_302
+305 30 5 val_305
+306 30 6 val_306
+307 30 7 val_307
+307 30 7 val_307
+308 30 8 val_308
+309 30 9 val_309
+309 30 9 val_309
+310 31 0 val_310
+311 31 1 val_311
+311 31 1 val_311
+311 31 1 val_311
+315 31 5 val_315
+316 31 6 val_316
+316 31 6 val_316
+316 31 6 val_316
+317 31 7 val_317
+317 31 7 val_317
+318 31 8 val_318
+318 31 8 val_318
+318 31 8 val_318
+321 32 1 val_321
+321 32 1 val_321
+322 32 2 val_322
+322 32 2 val_322
+323 32 3 val_323
+325 32 5 val_325
+325 32 5 val_325
+327 32 7 val_327
+327 32 7 val_327
+327 32 7 val_327
+331 33 1 val_331
+331 33 1 val_331
+332 33 2 val_332
+333 33 3 val_333
+333 33 3 val_333
+335 33 5 val_335
+336 33 6 val_336
+338 33 8 val_338
+339 33 9 val_339
+341 34 1 val_341
+342 34 2 val_342
+342 34 2 val_342
+344 34 4 val_344
+344 34 4 val_344
+345 34 5 val_345
+348 34 8 val_348
+348 34 8 val_348
+348 34 8 val_348
+348 34 8 val_348
+348 34 8 val_348
+351 35 1 val_351
+353 35 3 val_353
+353 35 3 val_353
+356 35 6 val_356
+360 36 0 val_360
+362 36 2 val_362
+364 36 4 val_364
+365 36 5 val_365
+366 36 6 val_366
+367 36 7 val_367
+367 36 7 val_367
+368 36 8 val_368
+369 36 9 val_369
+369 36 9 val_369
+369 36 9 val_369
+373 37 3 val_373
+374 37 4 val_374
+375 37 5 val_375
+377 37 7 val_377
+378 37 8 val_378
+379 37 9 val_379
+382 38 2 val_382
+382 38 2 val_382
+384 38 4 val_384
+384 38 4 val_384
+384 38 4 val_384
+386 38 6 val_386
+389 38 9 val_389
+392 39 2 val_392
+393 39 3 val_393
+394 39 4 val_394
+395 39 5 val_395
+395 39 5 val_395
+396 39 6 val_396
+396 39 6 val_396
+396 39 6 val_396
+397 39 7 val_397
+397 39 7 val_397
+399 39 9 val_399
+399 39 9 val_399
+41 4 1 val_41
+42 4 2 val_42
+42 4 2 val_42
+43 4 3 val_43
+44 4 4 val_44
+47 4 7 val_47
+400 40 0 val_400
+401 40 1 val_401
+401 40 1 val_401
+401 40 1 val_401
+401 40 1 val_401
+401 40 1 val_401
+402 40 2 val_402
+403 40 3 val_403
+403 40 3 val_403
+403 40 3 val_403
+404 40 4 val_404
+404 40 4 val_404
+406 40 6 val_406
+406 40 6 val_406
+406 40 6 val_406
+406 40 6 val_406
+407 40 7 val_407
+409 40 9 val_409
+409 40 9 val_409
+409 40 9 val_409
+411 41 1 val_411
+413 41 3 val_413
+413 41 3 val_413
+414 41 4 val_414
+414 41 4 val_414
+417 41 7 val_417
+417 41 7 val_417
+417 41 7 val_417
+418 41 8 val_418
+419 41 9 val_419
+421 42 1 val_421
+424 42 4 val_424
+424 42 4 val_424
+427 42 7 val_427
+429 42 9 val_429
+429 42 9 val_429
+430 43 0 val_430
+430 43 0 val_430
+430 43 0 val_430
+431 43 1 val_431
+431 43 1 val_431
+431 43 1 val_431
+432 43 2 val_432
+435 43 5 val_435
+436 43 6 val_436
+437 43 7 val_437
+438 43 8 val_438
+438 43 8 val_438
+438 43 8 val_438
+439 43 9 val_439
+439 43 9 val_439
+443 44 3 val_443
+444 44 4 val_444
+446 44 6 val_446
+448 44 8 val_448
+449 44 9 val_449
+452 45 2 val_452
+453 45 3 val_453
+454 45 4 val_454
+454 45 4 val_454
+454 45 4 val_454
+455 45 5 val_455
+457 45 7 val_457
+458 45 8 val_458
+458 45 8 val_458
+459 45 9 val_459
+459 45 9 val_459
+460 46 0 val_460
+462 46 2 val_462
+462 46 2 val_462
+463 46 3 val_463
+463 46 3 val_463
+466 46 6 val_466
+466 46 6 val_466
+466 46 6 val_466
+467 46 7 val_467
+468 46 8 val_468
+468 46 8 val_468
+468 46 8 val_468
+468 46 8 val_468
+469 46 9 val_469
+469 46 9 val_469
+469 46 9 val_469
+469 46 9 val_469
+469 46 9 val_469
+470 47 0 val_470
+472 47 2 val_472
+475 47 5 val_475
+477 47 7 val_477
+478 47 8 val_478
+478 47 8 val_478
+479 47 9 val_479
+480 48 0 val_480
+480 48 0 val_480
+480 48 0 val_480
+481 48 1 val_481
+482 48 2 val_482
+483 48 3 val_483
+484 48 4 val_484
+485 48 5 val_485
+487 48 7 val_487
+489 48 9 val_489
+489 48 9 val_489
+489 48 9 val_489
+489 48 9 val_489
+490 49 0 val_490
+491 49 1 val_491
+492 49 2 val_492
+492 49 2 val_492
+493 49 3 val_493
+494 49 4 val_494
+495 49 5 val_495
+496 49 6 val_496
+497 49 7 val_497
+498 49 8 val_498
+498 49 8 val_498
+498 49 8 val_498
+51 5 1 val_51
+51 5 1 val_51
+53 5 3 val_53
+54 5 4 val_54
+57 5 7 val_57
+58 5 8 val_58
+58 5 8 val_58
+64 6 4 val_64
+65 6 5 val_65
+66 6 6 val_66
+67 6 7 val_67
+67 6 7 val_67
+69 6 9 val_69
+70 7 0 val_70
+70 7 0 val_70
+70 7 0 val_70
+72 7 2 val_72
+72 7 2 val_72
+74 7 4 val_74
+76 7 6 val_76
+76 7 6 val_76
+77 7 7 val_77
+78 7 8 val_78
+80 8 0 val_80
+82 8 2 val_82
+83 8 3 val_83
+83 8 3 val_83
+84 8 4 val_84
+84 8 4 val_84
+85 8 5 val_85
+86 8 6 val_86
+87 8 7 val_87
+90 9 0 val_90
+90 9 0 val_90
+90 9 0 val_90
+92 9 2 val_92
+95 9 5 val_95
+95 9 5 val_95
+96 9 6 val_96
+97 9 7 val_97
+97 9 7 val_97
+98 9 8 val_98
+98 9 8 val_98
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/mapreduce2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapreduce2.q.out b/ql/src/test/results/clientpositive/llap/mapreduce2.q.out
new file mode 100644
index 0000000..2b18150
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/mapreduce2.q.out
@@ -0,0 +1,616 @@
+PREHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1
+MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value
+USING 'cat' AS (tkey, ten, one, tvalue)
+DISTRIBUTE BY tvalue, tkey
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1
+MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value
+USING 'cat' AS (tkey, ten, one, tvalue)
+DISTRIBUTE BY tvalue, tkey
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), UDFToInteger((key / 10)) (type: int), UDFToInteger((key % 10)) (type: int), value (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Transform Operator
+ command: cat
+ output info:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Map-reduce partition columns: _col3 (type: string), _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: UDFToInteger(VALUE._col0) (type: int), UDFToInteger(VALUE._col1) (type: int), UDFToInteger(VALUE._col2) (type: int), VALUE._col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1
+MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value
+USING 'cat' AS (tkey, ten, one, tvalue)
+DISTRIBUTE BY tvalue, tkey
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE dest1
+MAP src.key, CAST(src.key / 10 AS INT), CAST(src.key % 10 AS INT), src.value
+USING 'cat' AS (tkey, ten, one, tvalue)
+DISTRIBUTE BY tvalue, tkey
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.one SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.ten SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: SELECT * FROM (SELECT dest1.* FROM dest1 DISTRIBUTE BY key SORT BY key, ten, one, value) T ORDER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM (SELECT dest1.* FROM dest1 DISTRIBUTE BY key SORT BY key, ten, one, value) T ORDER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+0 0 0 val_0
+0 0 0 val_0
+0 0 0 val_0
+2 0 2 val_2
+4 0 4 val_4
+5 0 5 val_5
+5 0 5 val_5
+5 0 5 val_5
+8 0 8 val_8
+9 0 9 val_9
+10 1 0 val_10
+11 1 1 val_11
+12 1 2 val_12
+12 1 2 val_12
+15 1 5 val_15
+15 1 5 val_15
+17 1 7 val_17
+18 1 8 val_18
+18 1 8 val_18
+19 1 9 val_19
+20 2 0 val_20
+24 2 4 val_24
+24 2 4 val_24
+26 2 6 val_26
+26 2 6 val_26
+27 2 7 val_27
+28 2 8 val_28
+30 3 0 val_30
+33 3 3 val_33
+34 3 4 val_34
+35 3 5 val_35
+35 3 5 val_35
+35 3 5 val_35
+37 3 7 val_37
+37 3 7 val_37
+41 4 1 val_41
+42 4 2 val_42
+42 4 2 val_42
+43 4 3 val_43
+44 4 4 val_44
+47 4 7 val_47
+51 5 1 val_51
+51 5 1 val_51
+53 5 3 val_53
+54 5 4 val_54
+57 5 7 val_57
+58 5 8 val_58
+58 5 8 val_58
+64 6 4 val_64
+65 6 5 val_65
+66 6 6 val_66
+67 6 7 val_67
+67 6 7 val_67
+69 6 9 val_69
+70 7 0 val_70
+70 7 0 val_70
+70 7 0 val_70
+72 7 2 val_72
+72 7 2 val_72
+74 7 4 val_74
+76 7 6 val_76
+76 7 6 val_76
+77 7 7 val_77
+78 7 8 val_78
+80 8 0 val_80
+82 8 2 val_82
+83 8 3 val_83
+83 8 3 val_83
+84 8 4 val_84
+84 8 4 val_84
+85 8 5 val_85
+86 8 6 val_86
+87 8 7 val_87
+90 9 0 val_90
+90 9 0 val_90
+90 9 0 val_90
+92 9 2 val_92
+95 9 5 val_95
+95 9 5 val_95
+96 9 6 val_96
+97 9 7 val_97
+97 9 7 val_97
+98 9 8 val_98
+98 9 8 val_98
+100 10 0 val_100
+100 10 0 val_100
+103 10 3 val_103
+103 10 3 val_103
+104 10 4 val_104
+104 10 4 val_104
+105 10 5 val_105
+111 11 1 val_111
+113 11 3 val_113
+113 11 3 val_113
+114 11 4 val_114
+116 11 6 val_116
+118 11 8 val_118
+118 11 8 val_118
+119 11 9 val_119
+119 11 9 val_119
+119 11 9 val_119
+120 12 0 val_120
+120 12 0 val_120
+125 12 5 val_125
+125 12 5 val_125
+126 12 6 val_126
+128 12 8 val_128
+128 12 8 val_128
+128 12 8 val_128
+129 12 9 val_129
+129 12 9 val_129
+131 13 1 val_131
+133 13 3 val_133
+134 13 4 val_134
+134 13 4 val_134
+136 13 6 val_136
+137 13 7 val_137
+137 13 7 val_137
+138 13 8 val_138
+138 13 8 val_138
+138 13 8 val_138
+138 13 8 val_138
+143 14 3 val_143
+145 14 5 val_145
+146 14 6 val_146
+146 14 6 val_146
+149 14 9 val_149
+149 14 9 val_149
+150 15 0 val_150
+152 15 2 val_152
+152 15 2 val_152
+153 15 3 val_153
+155 15 5 val_155
+156 15 6 val_156
+157 15 7 val_157
+158 15 8 val_158
+160 16 0 val_160
+162 16 2 val_162
+163 16 3 val_163
+164 16 4 val_164
+164 16 4 val_164
+165 16 5 val_165
+165 16 5 val_165
+166 16 6 val_166
+167 16 7 val_167
+167 16 7 val_167
+167 16 7 val_167
+168 16 8 val_168
+169 16 9 val_169
+169 16 9 val_169
+169 16 9 val_169
+169 16 9 val_169
+170 17 0 val_170
+172 17 2 val_172
+172 17 2 val_172
+174 17 4 val_174
+174 17 4 val_174
+175 17 5 val_175
+175 17 5 val_175
+176 17 6 val_176
+176 17 6 val_176
+177 17 7 val_177
+178 17 8 val_178
+179 17 9 val_179
+179 17 9 val_179
+180 18 0 val_180
+181 18 1 val_181
+183 18 3 val_183
+186 18 6 val_186
+187 18 7 val_187
+187 18 7 val_187
+187 18 7 val_187
+189 18 9 val_189
+190 19 0 val_190
+191 19 1 val_191
+191 19 1 val_191
+192 19 2 val_192
+193 19 3 val_193
+193 19 3 val_193
+193 19 3 val_193
+194 19 4 val_194
+195 19 5 val_195
+195 19 5 val_195
+196 19 6 val_196
+197 19 7 val_197
+197 19 7 val_197
+199 19 9 val_199
+199 19 9 val_199
+199 19 9 val_199
+200 20 0 val_200
+200 20 0 val_200
+201 20 1 val_201
+202 20 2 val_202
+203 20 3 val_203
+203 20 3 val_203
+205 20 5 val_205
+205 20 5 val_205
+207 20 7 val_207
+207 20 7 val_207
+208 20 8 val_208
+208 20 8 val_208
+208 20 8 val_208
+209 20 9 val_209
+209 20 9 val_209
+213 21 3 val_213
+213 21 3 val_213
+214 21 4 val_214
+216 21 6 val_216
+216 21 6 val_216
+217 21 7 val_217
+217 21 7 val_217
+218 21 8 val_218
+219 21 9 val_219
+219 21 9 val_219
+221 22 1 val_221
+221 22 1 val_221
+222 22 2 val_222
+223 22 3 val_223
+223 22 3 val_223
+224 22 4 val_224
+224 22 4 val_224
+226 22 6 val_226
+228 22 8 val_228
+229 22 9 val_229
+229 22 9 val_229
+230 23 0 val_230
+230 23 0 val_230
+230 23 0 val_230
+230 23 0 val_230
+230 23 0 val_230
+233 23 3 val_233
+233 23 3 val_233
+235 23 5 val_235
+237 23 7 val_237
+237 23 7 val_237
+238 23 8 val_238
+238 23 8 val_238
+239 23 9 val_239
+239 23 9 val_239
+241 24 1 val_241
+242 24 2 val_242
+242 24 2 val_242
+244 24 4 val_244
+247 24 7 val_247
+248 24 8 val_248
+249 24 9 val_249
+252 25 2 val_252
+255 25 5 val_255
+255 25 5 val_255
+256 25 6 val_256
+256 25 6 val_256
+257 25 7 val_257
+258 25 8 val_258
+260 26 0 val_260
+262 26 2 val_262
+263 26 3 val_263
+265 26 5 val_265
+265 26 5 val_265
+266 26 6 val_266
+272 27 2 val_272
+272 27 2 val_272
+273 27 3 val_273
+273 27 3 val_273
+273 27 3 val_273
+274 27 4 val_274
+275 27 5 val_275
+277 27 7 val_277
+277 27 7 val_277
+277 27 7 val_277
+277 27 7 val_277
+278 27 8 val_278
+278 27 8 val_278
+280 28 0 val_280
+280 28 0 val_280
+281 28 1 val_281
+281 28 1 val_281
+282 28 2 val_282
+282 28 2 val_282
+283 28 3 val_283
+284 28 4 val_284
+285 28 5 val_285
+286 28 6 val_286
+287 28 7 val_287
+288 28 8 val_288
+288 28 8 val_288
+289 28 9 val_289
+291 29 1 val_291
+292 29 2 val_292
+296 29 6 val_296
+298 29 8 val_298
+298 29 8 val_298
+298 29 8 val_298
+302 30 2 val_302
+305 30 5 val_305
+306 30 6 val_306
+307 30 7 val_307
+307 30 7 val_307
+308 30 8 val_308
+309 30 9 val_309
+309 30 9 val_309
+310 31 0 val_310
+311 31 1 val_311
+311 31 1 val_311
+311 31 1 val_311
+315 31 5 val_315
+316 31 6 val_316
+316 31 6 val_316
+316 31 6 val_316
+317 31 7 val_317
+317 31 7 val_317
+318 31 8 val_318
+318 31 8 val_318
+318 31 8 val_318
+321 32 1 val_321
+321 32 1 val_321
+322 32 2 val_322
+322 32 2 val_322
+323 32 3 val_323
+325 32 5 val_325
+325 32 5 val_325
+327 32 7 val_327
+327 32 7 val_327
+327 32 7 val_327
+331 33 1 val_331
+331 33 1 val_331
+332 33 2 val_332
+333 33 3 val_333
+333 33 3 val_333
+335 33 5 val_335
+336 33 6 val_336
+338 33 8 val_338
+339 33 9 val_339
+341 34 1 val_341
+342 34 2 val_342
+342 34 2 val_342
+344 34 4 val_344
+344 34 4 val_344
+345 34 5 val_345
+348 34 8 val_348
+348 34 8 val_348
+348 34 8 val_348
+348 34 8 val_348
+348 34 8 val_348
+351 35 1 val_351
+353 35 3 val_353
+353 35 3 val_353
+356 35 6 val_356
+360 36 0 val_360
+362 36 2 val_362
+364 36 4 val_364
+365 36 5 val_365
+366 36 6 val_366
+367 36 7 val_367
+367 36 7 val_367
+368 36 8 val_368
+369 36 9 val_369
+369 36 9 val_369
+369 36 9 val_369
+373 37 3 val_373
+374 37 4 val_374
+375 37 5 val_375
+377 37 7 val_377
+378 37 8 val_378
+379 37 9 val_379
+382 38 2 val_382
+382 38 2 val_382
+384 38 4 val_384
+384 38 4 val_384
+384 38 4 val_384
+386 38 6 val_386
+389 38 9 val_389
+392 39 2 val_392
+393 39 3 val_393
+394 39 4 val_394
+395 39 5 val_395
+395 39 5 val_395
+396 39 6 val_396
+396 39 6 val_396
+396 39 6 val_396
+397 39 7 val_397
+397 39 7 val_397
+399 39 9 val_399
+399 39 9 val_399
+400 40 0 val_400
+401 40 1 val_401
+401 40 1 val_401
+401 40 1 val_401
+401 40 1 val_401
+401 40 1 val_401
+402 40 2 val_402
+403 40 3 val_403
+403 40 3 val_403
+403 40 3 val_403
+404 40 4 val_404
+404 40 4 val_404
+406 40 6 val_406
+406 40 6 val_406
+406 40 6 val_406
+406 40 6 val_406
+407 40 7 val_407
+409 40 9 val_409
+409 40 9 val_409
+409 40 9 val_409
+411 41 1 val_411
+413 41 3 val_413
+413 41 3 val_413
+414 41 4 val_414
+414 41 4 val_414
+417 41 7 val_417
+417 41 7 val_417
+417 41 7 val_417
+418 41 8 val_418
+419 41 9 val_419
+421 42 1 val_421
+424 42 4 val_424
+424 42 4 val_424
+427 42 7 val_427
+429 42 9 val_429
+429 42 9 val_429
+430 43 0 val_430
+430 43 0 val_430
+430 43 0 val_430
+431 43 1 val_431
+431 43 1 val_431
+431 43 1 val_431
+432 43 2 val_432
+435 43 5 val_435
+436 43 6 val_436
+437 43 7 val_437
+438 43 8 val_438
+438 43 8 val_438
+438 43 8 val_438
+439 43 9 val_439
+439 43 9 val_439
+443 44 3 val_443
+444 44 4 val_444
+446 44 6 val_446
+448 44 8 val_448
+449 44 9 val_449
+452 45 2 val_452
+453 45 3 val_453
+454 45 4 val_454
+454 45 4 val_454
+454 45 4 val_454
+455 45 5 val_455
+457 45 7 val_457
+458 45 8 val_458
+458 45 8 val_458
+459 45 9 val_459
+459 45 9 val_459
+460 46 0 val_460
+462 46 2 val_462
+462 46 2 val_462
+463 46 3 val_463
+463 46 3 val_463
+466 46 6 val_466
+466 46 6 val_466
+466 46 6 val_466
+467 46 7 val_467
+468 46 8 val_468
+468 46 8 val_468
+468 46 8 val_468
+468 46 8 val_468
+469 46 9 val_469
+469 46 9 val_469
+469 46 9 val_469
+469 46 9 val_469
+469 46 9 val_469
+470 47 0 val_470
+472 47 2 val_472
+475 47 5 val_475
+477 47 7 val_477
+478 47 8 val_478
+478 47 8 val_478
+479 47 9 val_479
+480 48 0 val_480
+480 48 0 val_480
+480 48 0 val_480
+481 48 1 val_481
+482 48 2 val_482
+483 48 3 val_483
+484 48 4 val_484
+485 48 5 val_485
+487 48 7 val_487
+489 48 9 val_489
+489 48 9 val_489
+489 48 9 val_489
+489 48 9 val_489
+490 49 0 val_490
+491 49 1 val_491
+492 49 2 val_492
+492 49 2 val_492
+493 49 3 val_493
+494 49 4 val_494
+495 49 5 val_495
+496 49 6 val_496
+497 49 7 val_497
+498 49 8 val_498
+498 49 8 val_498
+498 49 8 val_498
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/merge1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/merge1.q.out b/ql/src/test/results/clientpositive/llap/merge1.q.out
new file mode 100644
index 0000000..ce3cd2c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/merge1.q.out
@@ -0,0 +1,596 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table dest1(key int, val int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table dest1(key int, val int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: explain
+insert overwrite table dest1
+select key, count(1) from src group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table dest1
+select key, count(1) from src group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table dest1
+select key, count(1) from src group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@dest1
+POSTHOOK: query: insert overwrite table dest1
+select key, count(1) from src group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.val EXPRESSION [(src)src.null, ]
+PREHOOK: query: select * from dest1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@dest1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from dest1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@dest1
+#### A masked pattern was here ####
+0 3
+10 1
+100 2
+103 2
+104 2
+105 1
+11 1
+111 1
+113 2
+114 1
+116 1
+118 2
+119 3
+12 2
+120 2
+125 2
+126 1
+128 3
+129 2
+131 1
+133 1
+134 2
+136 1
+137 2
+138 4
+143 1
+145 1
+146 2
+149 2
+15 2
+150 1
+152 2
+153 1
+155 1
+156 1
+157 1
+158 1
+160 1
+162 1
+163 1
+164 2
+165 2
+166 1
+167 3
+168 1
+169 4
+17 1
+170 1
+172 2
+174 2
+175 2
+176 2
+177 1
+178 1
+179 2
+18 2
+180 1
+181 1
+183 1
+186 1
+187 3
+189 1
+19 1
+190 1
+191 2
+192 1
+193 3
+194 1
+195 2
+196 1
+197 2
+199 3
+2 1
+20 1
+200 2
+201 1
+202 1
+203 2
+205 2
+207 2
+208 3
+209 2
+213 2
+214 1
+216 2
+217 2
+218 1
+219 2
+221 2
+222 1
+223 2
+224 2
+226 1
+228 1
+229 2
+230 5
+233 2
+235 1
+237 2
+238 2
+239 2
+24 2
+241 1
+242 2
+244 1
+247 1
+248 1
+249 1
+252 1
+255 2
+256 2
+257 1
+258 1
+26 2
+260 1
+262 1
+263 1
+265 2
+266 1
+27 1
+272 2
+273 3
+274 1
+275 1
+277 4
+278 2
+28 1
+280 2
+281 2
+282 2
+283 1
+284 1
+285 1
+286 1
+287 1
+288 2
+289 1
+291 1
+292 1
+296 1
+298 3
+30 1
+302 1
+305 1
+306 1
+307 2
+308 1
+309 2
+310 1
+311 3
+315 1
+316 3
+317 2
+318 3
+321 2
+322 2
+323 1
+325 2
+327 3
+33 1
+331 2
+332 1
+333 2
+335 1
+336 1
+338 1
+339 1
+34 1
+341 1
+342 2
+344 2
+345 1
+348 5
+35 3
+351 1
+353 2
+356 1
+360 1
+362 1
+364 1
+365 1
+366 1
+367 2
+368 1
+369 3
+37 2
+373 1
+374 1
+375 1
+377 1
+378 1
+379 1
+382 2
+384 3
+386 1
+389 1
+392 1
+393 1
+394 1
+395 2
+396 3
+397 2
+399 2
+4 1
+400 1
+401 5
+402 1
+403 3
+404 2
+406 4
+407 1
+409 3
+41 1
+411 1
+413 2
+414 2
+417 3
+418 1
+419 1
+42 2
+421 1
+424 2
+427 1
+429 2
+43 1
+430 3
+431 3
+432 1
+435 1
+436 1
+437 1
+438 3
+439 2
+44 1
+443 1
+444 1
+446 1
+448 1
+449 1
+452 1
+453 1
+454 3
+455 1
+457 1
+458 2
+459 2
+460 1
+462 2
+463 2
+466 3
+467 1
+468 4
+469 5
+47 1
+470 1
+472 1
+475 1
+477 1
+478 2
+479 1
+480 3
+481 1
+482 1
+483 1
+484 1
+485 1
+487 1
+489 4
+490 1
+491 1
+492 2
+493 1
+494 1
+495 1
+496 1
+497 1
+498 3
+5 3
+51 2
+53 1
+54 1
+57 1
+58 2
+64 1
+65 1
+66 1
+67 2
+69 1
+70 3
+72 2
+74 1
+76 2
+77 1
+78 1
+8 1
+80 1
+82 1
+83 2
+84 2
+85 1
+86 1
+87 1
+9 1
+90 3
+92 1
+95 2
+96 1
+97 2
+98 2
+PREHOOK: query: drop table dest1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@dest1
+PREHOOK: Output: default@dest1
+POSTHOOK: query: drop table dest1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@dest1
+POSTHOOK: Output: default@dest1
+PREHOOK: query: create table test_src(key string, value string) partitioned by (ds string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_src
+POSTHOOK: query: create table test_src(key string, value string) partitioned by (ds string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_src
+PREHOOK: query: create table dest1(key string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest1
+POSTHOOK: query: create table dest1(key string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@dest1
+PREHOOK: query: insert overwrite table test_src partition(ds='101') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_src@ds=101
+POSTHOOK: query: insert overwrite table test_src partition(ds='101') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_src@ds=101
+POSTHOOK: Lineage: test_src PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_src PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table test_src partition(ds='102') select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_src@ds=102
+POSTHOOK: query: insert overwrite table test_src partition(ds='102') select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_src@ds=102
+POSTHOOK: Lineage: test_src PARTITION(ds=102).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_src PARTITION(ds=102).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+insert overwrite table dest1 select key from test_src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table dest1 select key from test_src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: test_src
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table dest1 select key from test_src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_src
+PREHOOK: Input: default@test_src@ds=101
+PREHOOK: Input: default@test_src@ds=102
+PREHOOK: Output: default@dest1
+POSTHOOK: query: insert overwrite table dest1 select key from test_src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_src
+POSTHOOK: Input: default@test_src@ds=101
+POSTHOOK: Input: default@test_src@ds=102
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ]
+PREHOOK: query: explain
+insert overwrite table dest1 select key from test_src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table dest1 select key from test_src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: test_src
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.dest1
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table dest1 select key from test_src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_src
+PREHOOK: Input: default@test_src@ds=101
+PREHOOK: Input: default@test_src@ds=102
+PREHOOK: Output: default@dest1
+POSTHOOK: query: insert overwrite table dest1 select key from test_src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_src
+POSTHOOK: Input: default@test_src@ds=101
+POSTHOOK: Input: default@test_src@ds=102
+POSTHOOK: Output: default@dest1
+POSTHOOK: Lineage: dest1.key SIMPLE [(test_src)test_src.FieldSchema(name:key, type:string, comment:null), ]
[27/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out
new file mode 100644
index 0000000..356aafc
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out
@@ -0,0 +1,2411 @@
+PREHOOK: query: create table over1k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ row format delimited
+ fields terminated by '|'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k
+POSTHOOK: query: create table over1k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ row format delimited
+ fields terminated by '|'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k
+PREHOOK: query: load data local inpath '../../data/files/over1k' into table over1k
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@over1k
+POSTHOOK: query: load data local inpath '../../data/files/over1k' into table over1k
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@over1k
+PREHOOK: query: create table over1k_part(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part
+POSTHOOK: query: create table over1k_part(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part
+PREHOOK: query: create table over1k_part_limit like over1k_part
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part_limit
+POSTHOOK: query: create table over1k_part_limit like over1k_part
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part_limit
+PREHOOK: query: create table over1k_part_buck(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si) into 4 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part_buck
+POSTHOOK: query: create table over1k_part_buck(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si) into 4 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part_buck
+PREHOOK: query: create table over1k_part_buck_sort(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si)
+ sorted by (f) into 4 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part_buck_sort
+POSTHOOK: query: create table over1k_part_buck_sort(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si)
+ sorted by (f) into 4 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part_buck_sort
+PREHOOK: query: -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization
+explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization
+explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint)
+ sort order: +
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint)
+ sort order: +
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_limit
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_limit
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), '_bucket_number' (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint), VALUE._bucket_number (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _bucket_number
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_buck
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_buck
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float)
+ sort order: +++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint), VALUE._bucket_number (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _bucket_number
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_buck_sort
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_buck_sort
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_part@ds=foo
+POSTHOOK: query: insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_part@ds=foo/t=27
+POSTHOOK: Output: default@over1k_part@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=27).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=27).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=27).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=27).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_part_limit@ds=foo
+POSTHOOK: query: insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_part_limit@ds=foo/t=27
+POSTHOOK: Output: default@over1k_part_limit@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=27).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=27).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=27).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=27).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_part_buck
+POSTHOOK: query: insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_part_buck@t=27
+POSTHOOK: Output: default@over1k_part_buck@t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=27).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=27).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=27).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=27).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_part_buck_sort
+POSTHOOK: query: insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_part_buck_sort@t=27
+POSTHOOK: Output: default@over1k_part_buck_sort@t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=27).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=27).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=27).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=27).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
+explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
+explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint)
+ sort order: +
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint)
+ sort order: +
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_limit
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_limit
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), '_bucket_number' (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint), VALUE._bucket_number (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _bucket_number
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_buck
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ t
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_buck
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float)
+ sort order: +++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint), VALUE._bucket_number (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _bucket_number
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_buck_sort
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ t
+ replace: false
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part_buck_sort
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_part@ds=foo
+POSTHOOK: query: insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_part@ds=foo/t=27
+POSTHOOK: Output: default@over1k_part@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=27).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=27).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=27).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=27).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_part_limit@ds=foo
+POSTHOOK: query: insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_part_limit@ds=foo/t=27
+POSTHOOK: Output: default@over1k_part_limit@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=27).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=27).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=27).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=27).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_limit PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_part_buck
+POSTHOOK: query: insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_part_buck@t=27
+POSTHOOK: Output: default@over1k_part_buck@t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=27).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=27).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=27).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=27).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_part_buck_sort
+POSTHOOK: query: insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_part_buck_sort@t=27
+POSTHOOK: Output: default@over1k_part_buck_sort@t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=27).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=27).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=27).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=27).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: desc formatted over1k_part partition(ds="foo",t=27)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part
+POSTHOOK: query: desc formatted over1k_part partition(ds="foo",t=27)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [foo, 27]
+Database: default
+Table: over1k_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 2
+ numRows 32
+ rawDataSize 830
+ totalSize 862
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part
+POSTHOOK: query: desc formatted over1k_part partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [foo, __HIVE_DEFAULT_PARTITION__]
+Database: default
+Table: over1k_part
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 2
+ numRows 6
+ rawDataSize 156
+ totalSize 162
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_limit partition(ds="foo",t=27)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_limit
+POSTHOOK: query: desc formatted over1k_part_limit partition(ds="foo",t=27)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_limit
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [foo, 27]
+Database: default
+Table: over1k_part_limit
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 2
+ numRows 14
+ rawDataSize 362
+ totalSize 376
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_limit partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_limit
+POSTHOOK: query: desc formatted over1k_part_limit partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_limit
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [foo, __HIVE_DEFAULT_PARTITION__]
+Database: default
+Table: over1k_part_limit
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 2
+ numRows 6
+ rawDataSize 156
+ totalSize 162
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_buck partition(t=27)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_buck
+POSTHOOK: query: desc formatted over1k_part_buck partition(t=27)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_buck
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [27]
+Database: default
+Table: over1k_part_buck
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 8
+ numRows 32
+ rawDataSize 830
+ totalSize 862
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [si]
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_buck partition(t="__HIVE_DEFAULT_PARTITION__")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_buck
+POSTHOOK: query: desc formatted over1k_part_buck partition(t="__HIVE_DEFAULT_PARTITION__")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_buck
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [__HIVE_DEFAULT_PARTITION__]
+Database: default
+Table: over1k_part_buck
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 8
+ numRows 6
+ rawDataSize 156
+ totalSize 162
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [si]
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_buck_sort partition(t=27)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_buck_sort
+POSTHOOK: query: desc formatted over1k_part_buck_sort partition(t=27)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_buck_sort
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [27]
+Database: default
+Table: over1k_part_buck_sort
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 8
+ numRows 32
+ rawDataSize 830
+ totalSize 862
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [si]
+Sort Columns: [Order(col:f, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted over1k_part_buck_sort partition(t="__HIVE_DEFAULT_PARTITION__")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part_buck_sort
+POSTHOOK: query: desc formatted over1k_part_buck_sort partition(t="__HIVE_DEFAULT_PARTITION__")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part_buck_sort
+# col_name data_type comment
+
+si smallint
+i int
+b bigint
+f float
+
+# Partition Information
+# col_name data_type comment
+
+t tinyint
+
+# Detailed Partition Information
+Partition Value: [__HIVE_DEFAULT_PARTITION__]
+Database: default
+Table: over1k_part_buck_sort
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 8
+ numRows 6
+ rawDataSize 156
+ totalSize 162
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [si]
+Sort Columns: [Order(col:f, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: select count(*) from over1k_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_part
+PREHOOK: Input: default@over1k_part@ds=foo/t=27
+PREHOOK: Input: default@over1k_part@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from over1k_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_part
+POSTHOOK: Input: default@over1k_part@ds=foo/t=27
+POSTHOOK: Input: default@over1k_part@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+38
+PREHOOK: query: select count(*) from over1k_part_limit
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_part_limit
+PREHOOK: Input: default@over1k_part_limit@ds=foo/t=27
+PREHOOK: Input: default@over1k_part_limit@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from over1k_part_limit
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_part_limit
+POSTHOOK: Input: default@over1k_part_limit@ds=foo/t=27
+POSTHOOK: Input: default@over1k_part_limit@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+20
+PREHOOK: query: select count(*) from over1k_part_buck
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_part_buck
+PREHOOK: Input: default@over1k_part_buck@t=27
+PREHOOK: Input: default@over1k_part_buck@t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from over1k_part_buck
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_part_buck
+POSTHOOK: Input: default@over1k_part_buck@t=27
+POSTHOOK: Input: default@over1k_part_buck@t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+38
+PREHOOK: query: select count(*) from over1k_part_buck_sort
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k_part_buck_sort
+PREHOOK: Input: default@over1k_part_buck_sort@t=27
+PREHOOK: Input: default@over1k_part_buck_sort@t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from over1k_part_buck_sort
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k_part_buck_sort
+POSTHOOK: Input: default@over1k_part_buck_sort@t=27
+POSTHOOK: Input: default@over1k_part_buck_sort@t=__HIVE_DEFAULT_PARTITION__
+#### A masked pattern was here ####
+38
+PREHOOK: query: -- tests for HIVE-6883
+create table over1k_part2(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k_part2
+POSTHOOK: query: -- tests for HIVE-6883
+create table over1k_part2(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k_part2
+PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: int)
+ sort order: +
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: float), VALUE._col3 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: int)
+ sort order: +
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: float), VALUE._col3 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint), _col1 (type: int)
+ sort order: ++
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: int)
+ sort order: +
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Limit
+ Number of rows: 10
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (_col0 is null or (_col0 = 27)) (type: boolean)
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint)
+ sort order: +
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
+PREHOOK: type: QUERY
+POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ sort order: +++++
+ Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator
+explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
+PREHOOK: type: QUERY
+POSTHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator
+explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: over1k
+ Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (t is null or (t = 27)) (type: boolean)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ sort order: +++++
+ Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col4 (type: tinyint)
+ sort order: +
+ Map-reduce partition columns: _col4 (type: tinyint)
+ Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds foo
+ t
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.over1k_part2
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1k_part2@ds=foo
+POSTHOOK: query: insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1k_part2@ds=foo/t=27
+POSTHOOK: Output: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: over1k_part2 PARTITION(ds=foo,t=27).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part2 PARTITION(ds=foo,t=27).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part2 PARTITION(ds=foo,t=27).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part2 PARTITION(ds=foo,t=27).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1k_part2 PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1k_part2 PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1k_part2 PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1k_part2 PARTITION(ds=foo,t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+PREHOOK: query: desc formatted over1k_part2 partition(ds="foo",t=27)
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@over1k_part2
+POSTHOOK: query: desc formatted over1k_part2 partition(ds="foo",t=27)
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@over1k_part2
+# col_name data_type
<TRUNCATED>
[32/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/count.q.out b/ql/src/test/results/clientpositive/llap/count.q.out
new file mode 100644
index 0000000..d59b063
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/count.q.out
@@ -0,0 +1,298 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+create table abcd (a int, b int, c int, d int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@abcd
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+create table abcd (a int, b int, c int, d int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@abcd
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@abcd
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@abcd
+PREHOOK: query: select * from abcd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select * from abcd
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+10 100 45 4
+10 100 NULL 5
+10 1000 50 1
+100 100 10 3
+12 100 75 7
+12 NULL 80 2
+NULL 35 23 6
+PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: abcd
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: a (type: int), b (type: int), c (type: int), d (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(DISTINCT _col1), count(DISTINCT _col2), sum(_col3)
+ keys: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+ sort order: +++
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0), sum(VALUE._col2)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+10 2 2 10
+100 1 1 3
+12 1 2 9
+NULL 1 1 6
+PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: abcd
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: a (type: int), b (type: int), c (type: int), d (type: int)
+ outputColumnNames: _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1), count(), count(_col1), count(_col2), count(_col3), count(_col4), count(DISTINCT _col1), count(DISTINCT _col2), count(DISTINCT _col3), count(DISTINCT _col4), count(DISTINCT _col1, _col2), count(DISTINCT _col2, _col3), count(DISTINCT _col3, _col4), count(DISTINCT _col1, _col4), count(DISTINCT _col1, _col3), count(DISTINCT _col2, _col4), count(DISTINCT _col1, _col2, _col3), count(DISTINCT _col2, _col3, _col4), count(DISTINCT _col1, _col3, _col4), count(DISTINCT _col1, _col2, _col4), count(DISTINCT _col1, _col2, _col3, _col4)
+ keys: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: int)
+ sort order: ++++
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0), count(VALUE._col1), count(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY.
_col0:14._col3)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+ Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+7 7 6 6 6 7 3 3 6 7 4 5 6 6 5 6 4 5 5 5 4
+PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: abcd
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: a (type: int), b (type: int), c (type: int), d (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+ sort order: +++
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col3 (type: int)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0), sum(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: complete
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select a, count(distinct b), count(distinct c), sum(d) from abcd group by a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+10 2 2 10
+100 1 1 3
+12 1 2 9
+NULL 1 1 6
+PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: abcd
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: a (type: int), b (type: int), c (type: int), d (type: int)
+ outputColumnNames: _col1, _col2, _col3, _col4
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int)
+ sort order: ++++
+ Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(1), count(), count(KEY._col0:0._col0), count(KEY._col0:1._col0), count(KEY._col0:2._col0), count(KEY._col0:3._col0), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, K
EY._col0:14._col3)
+ mode: complete
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
+ Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@abcd
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@abcd
+#### A masked pattern was here ####
+7 7 6 6 6 7 3 3 6 7 4 5 6 6 5 6 4 5 5 5 4
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/create_merge_compressed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/create_merge_compressed.q.out b/ql/src/test/results/clientpositive/llap/create_merge_compressed.q.out
new file mode 100644
index 0000000..58f517c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/create_merge_compressed.q.out
@@ -0,0 +1,138 @@
+PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tgt_rc_merge_test
+PREHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: query: insert into table tgt_rc_merge_test select * from src_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: Lineage: tgt_rc_merge_test.key SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tgt_rc_merge_test.value SIMPLE [(src_rc_merge_test)src_rc_merge_test.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: show table extended like `tgt_rc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `tgt_rc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:tgt_rc_merge_test
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:2
+totalFileSize:342
+maxFileSize:171
+minFileSize:171
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from tgt_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from tgt_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+10
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+46 -751895388
+PREHOOK: query: alter table tgt_rc_merge_test concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@tgt_rc_merge_test
+PREHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: query: alter table tgt_rc_merge_test concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@tgt_rc_merge_test
+POSTHOOK: Output: default@tgt_rc_merge_test
+PREHOOK: query: show table extended like `tgt_rc_merge_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `tgt_rc_merge_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:tgt_rc_merge_test
+#### A masked pattern was here ####
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:1
+totalFileSize:243
+maxFileSize:243
+minFileSize:243
+#### A masked pattern was here ####
+
+PREHOOK: query: select count(1) from tgt_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from tgt_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+10
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tgt_rc_merge_test
+#### A masked pattern was here ####
+46 -751895388
+PREHOOK: query: drop table src_rc_merge_test
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_rc_merge_test
+PREHOOK: Output: default@src_rc_merge_test
+POSTHOOK: query: drop table src_rc_merge_test
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_rc_merge_test
+POSTHOOK: Output: default@src_rc_merge_test
+PREHOOK: query: drop table tgt_rc_merge_test
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@tgt_rc_merge_test
+PREHOOK: Output: default@tgt_rc_merge_test
+POSTHOOK: query: drop table tgt_rc_merge_test
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@tgt_rc_merge_test
+POSTHOOK: Output: default@tgt_rc_merge_test
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cross_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cross_join.q.out b/ql/src/test/results/clientpositive/llap/cross_join.q.out
new file mode 100644
index 0000000..6ff8c2d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cross_join.q.out
@@ -0,0 +1,214 @@
+Warning: Shuffle Join MERGEJOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: -- current
+explain select src.key from src join src src2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- current
+explain select src.key from src join src src2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Shuffle Join MERGEJOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: -- ansi cross join
+explain select src.key from src cross join src src2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- ansi cross join
+explain select src.key from src cross join src src2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: -- appending condition is allowed
+explain select src.key from src cross join src src2 on src.key=src2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- appending condition is allowed
+explain select src.key from src cross join src src2 on src.key=src2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cross_product_check_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/llap/cross_product_check_1.q.out
new file mode 100644
index 0000000..5556478
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cross_product_check_1.q.out
@@ -0,0 +1,575 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table A as
+select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@A
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table A as
+select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@A
+PREHOOK: query: create table B as
+select * from src
+limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@B
+POSTHOOK: query: create table B as
+select * from src
+limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@B
+Warning: Shuffle Join MERGEJOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: explain select * from A join B
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from A join B
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Shuffle Join MERGEJOIN[18][tables = [d1, d2, a]] in Stage 'Reducer 3' is a cross product
+PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: d2
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: string), value (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Shuffle Join MERGEJOIN[24][tables = [a, od1]] in Stage 'Reducer 4' is a cross product
+PREHOOK: query: explain select * from A join
+ (select d1.key
+ from B d1 join B d2 on d1.key = d2.key
+ where 1 = 1 group by d1.key) od1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from A join
+ (select d1.key
+ from B d1 join B d2 on d1.key = d2.key
+ where 1 = 1 group by d1.key) od1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Map 6 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: d2
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: string), value (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Reducer 4
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col5
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Shuffle Join MERGEJOIN[17][tables = [d1, d2]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[18][tables = [a, od1]] in Stage 'Reducer 4' is a cross product
+PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Map 6 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: d2
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: key (type: string), value (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0
+ Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 5 Data size: 51 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 5 Data size: 51 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Reducer 4
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col5
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Shuffle Join MERGEJOIN[29][tables = [ss, od1]] in Stage 'Reducer 3' is a cross product
+PREHOOK: query: explain select * from
+(select A.key from A group by key) ss join
+(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from
+(select A.key from A group by key) ss join
+(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
+ Reducer 5 <- Map 4 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+ Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: key
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: key (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 7
+ Map Operator Tree:
+ TableScan
+ alias: d2
+ Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 5
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ Reducer 6
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/llap/cross_product_check_2.q.out
new file mode 100644
index 0000000..c2a6b94
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cross_product_check_2.q.out
@@ -0,0 +1,534 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table A as
+select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@A
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table A as
+select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@A
+PREHOOK: query: create table B as
+select * from src order by key
+limit 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@B
+POSTHOOK: query: create table B as
+select * from src order by key
+limit 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@B
+Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Map 1' is a cross product
+PREHOOK: query: explain select * from A join B
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from A join B
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col2, _col3
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string)
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Map Join MAPJOIN[18][bigTable=a] in task 'Map 3' is a cross product
+PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+ Map 3 <- Map 1 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ outputColumnNames: _col0, _col1, _col5, _col6
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: d2
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Map Join MAPJOIN[24][bigTable=a] in task 'Map 4' is a cross product
+PREHOOK: query: explain select * from A join
+ (select d1.key
+ from B d1 join B d2 on d1.key = d2.key
+ where 1 = 1 group by d1.key) od1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from A join
+ (select d1.key
+ from B d1 join B d2 on d1.key = d2.key
+ where 1 = 1 group by d1.key) od1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Map 4 <- Reducer 2 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: d2
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col5
+ input vertices:
+ 1 Reducer 2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Map Join MAPJOIN[17][bigTable=d1] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[18][bigTable=a] in task 'Map 4' is a cross product
+PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Map 4 <- Reducer 2 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 11 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 11 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 11 Data size: 105 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: d2
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1, _col5
+ input vertices:
+ 1 Reducer 2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 5 Data size: 47 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 5 Data size: 47 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+Warning: Map Join MAPJOIN[29][bigTable=?] in task 'Reducer 2' is a cross product
+PREHOOK: query: explain select * from
+(select A.key from A group by key) ss join
+(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from
+(select A.key from A group by key) ss join
+(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 3 <- Map 5 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (BROADCAST_EDGE)
+ Reducer 4 <- Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: key
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: key (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: d1
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 5
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: d2
+ Statistics: Num rows: 10 Data size: 96 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 5 Data size: 48 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0
+ 1
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 1 Reducer 4
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 4
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
[30/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
new file mode 100644
index 0000000..5a67250
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
@@ -0,0 +1,5352 @@
+PREHOOK: query: select distinct ds from srcpart
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select distinct ds from srcpart
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+2008-04-08
+2008-04-09
+PREHOOK: query: select distinct hr from srcpart
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select distinct hr from srcpart
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+11
+12
+PREHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds
+PREHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds
+POSTHOOK: type: CREATETABLE_AS_SELECT
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-4 depends on stages: Stage-2, Stage-0
+ Stage-3 depends on stages: Stage-4
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ds (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string), _col0 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart_date
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-4
+ Create Table Operator:
+ Create Table
+ columns: ds string, date string
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+ serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart_date
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-0
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcpart_date
+POSTHOOK: query: create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcpart_date
+PREHOOK: query: create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcpart_hour
+POSTHOOK: query: create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcpart_hour
+PREHOOK: query: create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcpart_date_hour
+POSTHOOK: query: create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcpart_date_hour
+PREHOOK: query: create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcpart_double_hour
+POSTHOOK: query: create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcpart_double_hour
+PREHOOK: query: -- single column, single key
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- single column, single key
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: ds is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_date
+ filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean)
+ Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ds (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: srcpart
+ Partition key expr: ds
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Target column: ds
+ Target Vertex: Map 1
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 ds (type: string)
+ 1 ds (type: string)
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_date
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_date
+#### A masked pattern was here ####
+1000
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: ds is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_date
+ filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean)
+ Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 ds (type: string)
+ 1 ds (type: string)
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_date
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_date
+#### A masked pattern was here ####
+1000
+PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+1000
+PREHOOK: query: -- multiple sources, single key
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
+PREHOOK: type: QUERY
+POSTHOOK: query: -- multiple sources, single key
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Map 6 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ value expressions: hr (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_date
+ filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean)
+ Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ds (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: srcpart
+ Partition key expr: ds
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Target column: ds
+ Target Vertex: Map 1
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_hour
+ filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (hr is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: hr (type: string)
+ sort order: +
+ Map-reduce partition columns: hr (type: string)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hr (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: srcpart
+ Partition key expr: hr
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Target column: hr
+ Target Vertex: Map 1
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 ds (type: string)
+ 1 ds (type: string)
+ outputColumnNames: _col3
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col3 (type: string)
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col3 (type: string)
+ 1 hr (type: string)
+ Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_date
+PREHOOK: Input: default@srcpart_hour
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_date
+POSTHOOK: Input: default@srcpart_hour
+#### A masked pattern was here ####
+500
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Map 6 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: (ds is not null and hr is not null) (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ value expressions: hr (type: string)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_date
+ filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean)
+ Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_hour
+ filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (hr is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: hr (type: string)
+ sort order: +
+ Map-reduce partition columns: hr (type: string)
+ Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 ds (type: string)
+ 1 ds (type: string)
+ outputColumnNames: _col3
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col3 (type: string)
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col3 (type: string)
+ 1 hr (type: string)
+ Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_date
+PREHOOK: Input: default@srcpart_hour
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr)
+where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_date
+POSTHOOK: Input: default@srcpart_hour
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+500
+PREHOOK: query: -- multiple columns single source
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+PREHOOK: type: QUERY
+POSTHOOK: query: -- multiple columns single source
+EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string), hr (type: string)
+ sort order: ++
+ Map-reduce partition columns: ds (type: string), hr (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_date_hour
+ filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string), hr (type: string)
+ sort order: ++
+ Map-reduce partition columns: ds (type: string), hr (type: string)
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ds (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: srcpart
+ Partition key expr: ds
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Target column: ds
+ Target Vertex: Map 1
+ Select Operator
+ expressions: hr (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: srcpart
+ Partition key expr: hr
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Target column: hr
+ Target Vertex: Map 1
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 ds (type: string), hr (type: string)
+ 1 ds (type: string), hr (type: string)
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_date_hour
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_date_hour
+#### A masked pattern was here ####
+500
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: (ds is not null and hr is not null) (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string), hr (type: string)
+ sort order: ++
+ Map-reduce partition columns: ds (type: string), hr (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_date_hour
+ filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string), hr (type: string)
+ sort order: ++
+ Map-reduce partition columns: ds (type: string), hr (type: string)
+ Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 ds (type: string), hr (type: string)
+ 1 ds (type: string), hr (type: string)
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_date_hour
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_date_hour
+#### A masked pattern was here ####
+500
+PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+500
+PREHOOK: query: -- empty set
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- empty set
+EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: ds is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_date
+ filterExpr: (ds is not null and (date = 'I DONT EXIST')) (type: boolean)
+ Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: ds (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: srcpart
+ Partition key expr: ds
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Target column: ds
+ Target Vertex: Map 1
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 ds (type: string)
+ 1 ds (type: string)
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_date
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_date
+#### A masked pattern was here ####
+0
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: ds is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_date
+ filterExpr: (ds is not null and (date = 'I DONT EXIST')) (type: boolean)
+ Statistics: Num rows: 2 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: ds (type: string)
+ sort order: +
+ Map-reduce partition columns: ds (type: string)
+ Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 ds (type: string)
+ 1 ds (type: string)
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_date
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_date
+#### A masked pattern was here ####
+0
+PREHOOK: query: select count(*) from srcpart where ds = 'I DONT EXIST'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart where ds = 'I DONT EXIST'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+#### A masked pattern was here ####
+0
+PREHOOK: query: -- expressions
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+PREHOOK: type: QUERY
+POSTHOOK: query: -- expressions
+EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: UDFToDouble(hr) is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: UDFToDouble(hr) is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: UDFToDouble(hr) (type: double)
+ sort order: +
+ Map-reduce partition columns: UDFToDouble(hr) (type: double)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_double_hour
+ filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+ sort order: +
+ Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: double)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: srcpart
+ Partition key expr: UDFToDouble(hr)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Target column: hr
+ Target Vertex: Map 1
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 UDFToDouble(hr) (type: double)
+ 1 UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_double_hour
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_double_hour
+#### A masked pattern was here ####
+1000
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: (hr * 2) is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (hr * 2) is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: (hr * 2) (type: double)
+ sort order: +
+ Map-reduce partition columns: (hr * 2) (type: double)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_double_hour
+ filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (hr is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: hr (type: double)
+ sort order: +
+ Map-reduce partition columns: hr (type: double)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hr (type: double)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: double)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: srcpart
+ Partition key expr: (hr * 2)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Target column: hr
+ Target Vertex: Map 1
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 (hr * 2) (type: double)
+ 1 hr (type: double)
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_double_hour
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_double_hour
+#### A masked pattern was here ####
+1000
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: UDFToDouble(hr) is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: UDFToDouble(hr) is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: UDFToDouble(hr) (type: double)
+ sort order: +
+ Map-reduce partition columns: UDFToDouble(hr) (type: double)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_double_hour
+ filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+ sort order: +
+ Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 UDFToDouble(hr) (type: double)
+ 1 UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_double_hour
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_double_hour
+#### A masked pattern was here ####
+1000
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: (hr * 2) is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (hr * 2) is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: (hr * 2) (type: double)
+ sort order: +
+ Map-reduce partition columns: (hr * 2) (type: double)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_double_hour
+ filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (hr is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: hr (type: double)
+ sort order: +
+ Map-reduce partition columns: hr (type: double)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 (hr * 2) (type: double)
+ 1 hr (type: double)
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Input: default@srcpart_double_hour
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Input: default@srcpart_double_hour
+#### A masked pattern was here ####
+1000
+PREHOOK: query: select count(*) from srcpart where hr = 11
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from srcpart where hr = 11
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+#### A masked pattern was here ####
+1000
+PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ filterExpr: UDFToString((hr * 2)) is not null (type: boolean)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: UDFToString((hr * 2)) is not null (type: boolean)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: UDFToString((hr * 2)) (type: string)
+ sort order: +
+ Map-reduce partition columns: UDFToString((hr * 2)) (type: string)
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: srcpart_double_hour
+ filterExpr: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: UDFToString(hr) (type: string)
+ sort order: +
+ Map-reduce partition columns: UDFToString(hr) (type: string)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: UDFToString(hr) (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Dynamic Partitioning Event Operator
+ Target Input: srcpart
+ Partition key expr: UDFToString((hr * 2))
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Target column: hr
+ Target Vertex: Map 1
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 UDFToString((hr * 2)) (type: string)
+ 1 UDFToString(hr) (type: string)
+ Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats:
<TRUNCATED>
[02/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/ptf_streaming.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/ptf_streaming.q.out b/ql/src/test/results/clientpositive/llap/ptf_streaming.q.out
new file mode 100644
index 0000000..a4fed1e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/ptf_streaming.q.out
@@ -0,0 +1,2640 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+--1. test1
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on part
+ partition by p_mfgr
+ order by p_name
+ )
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+--1. test1
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on part
+ partition by p_mfgr
+ order by p_name
+ )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int), p_retailprice (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopstreaming
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: sum_window_2
+ arguments: _col7
+ name: sum
+ window function: GenericUDAFSumDouble
+ window frame: PRECEDING(MAX)~
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), sum_window_2 (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on part
+ partition by p_mfgr
+ order by p_name
+ )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on part
+ partition by p_mfgr
+ order by p_name
+ )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001
+Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001
+Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68
+Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38
+Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001
+Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62
+Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68
+Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95
+Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34
+Manufacturer#3 almond antique misty red olive 1 4 4 6195.32
+Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61
+Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67
+Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27
+Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001
+Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69
+Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004
+Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18
+Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66
+PREHOOK: query: -- 2. testJoinWithNoop
+explain
+select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noopstreaming (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2. testJoinWithNoop
+explain
+select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noopstreaming (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: p1
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: p_partkey is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_partkey (type: int)
+ sort order: +
+ Map-reduce partition columns: p_partkey (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: p2
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: p_partkey is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_partkey (type: int)
+ sort order: +
+ Map-reduce partition columns: p_partkey (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 p_partkey (type: int)
+ 1 p_partkey (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: j
+ output shape: _col1: string, _col2: string, _col5: int
+ type: SUBQUERY
+ Partition table definition
+ input alias: ptf_1
+ name: noopstreaming
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: lag_window_0
+ arguments: _col5, 1, _col5
+ name: lag
+ window function: GenericUDAFLagEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), (_col5 - lag_window_0) (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noopstreaming (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name,
+p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz
+from noopstreaming (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j
+distribute by j.p_mfgr
+sort by j.p_name)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 0
+Manufacturer#1 almond antique burnished rose metallic 2 0
+Manufacturer#1 almond antique burnished rose metallic 2 0
+Manufacturer#1 almond antique burnished rose metallic 2 0
+Manufacturer#1 almond antique chartreuse lavender yellow 34 32
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 -28
+Manufacturer#1 almond aquamarine burnished black steel 28 22
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 14
+Manufacturer#2 almond antique violet chocolate turquoise 14 0
+Manufacturer#2 almond antique violet turquoise frosted 40 26
+Manufacturer#2 almond aquamarine midnight light salmon 2 -38
+Manufacturer#2 almond aquamarine rose maroon antique 25 23
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 -7
+Manufacturer#3 almond antique chartreuse khaki white 17 0
+Manufacturer#3 almond antique forest lavender goldenrod 14 -3
+Manufacturer#3 almond antique metallic orange dim 19 5
+Manufacturer#3 almond antique misty red olive 1 -18
+Manufacturer#3 almond antique olive coral navajo 45 44
+Manufacturer#4 almond antique gainsboro frosted violet 10 0
+Manufacturer#4 almond antique violet mint lemon 39 29
+Manufacturer#4 almond aquamarine floral ivory bisque 27 -12
+Manufacturer#4 almond aquamarine yellow dodger mint 7 -20
+Manufacturer#4 almond azure aquamarine papaya violet 12 5
+Manufacturer#5 almond antique blue firebrick mint 31 0
+Manufacturer#5 almond antique medium spring khaki 6 -25
+Manufacturer#5 almond antique sky peru orange 2 -4
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 44
+Manufacturer#5 almond azure blanched chiffon midnight 23 -23
+PREHOOK: query: -- 7. testJoin
+explain
+select abc.*
+from noopstreaming(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 7. testJoin
+explain
+select abc.*
+from noopstreaming(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Map 4 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_partkey (type: int), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: p1
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: p_partkey is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_partkey (type: int)
+ sort order: +
+ Map-reduce partition columns: p_partkey (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: double), VALUE._col6 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string
+ type: TABLE
+ Partition table definition
+ input alias: abc
+ name: noopstreaming
+ order by: _col1
+ output shape: _col0: int, _col1: string, _col2: string, _col3: string, _col4: string, _col5: int, _col6: string, _col7: double, _col8: string
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: _col0 is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 p_partkey (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select abc.*
+from noopstreaming(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select abc.*
+from noopstreaming(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+105685 almond antique violet chocolate turquoise Manufacturer#2 Brand#22 MEDIUM ANODIZED COPPER 14 MED CAN 1690.68 ly pending requ
+110592 almond antique salmon chartreuse burlywood Manufacturer#1 Brand#15 PROMO BURNISHED NICKEL 6 JUMBO PKG 1602.59 to the furiously
+112398 almond antique metallic orange dim Manufacturer#3 Brand#32 MEDIUM BURNISHED BRASS 19 JUMBO JAR 1410.39 ole car
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+121152 almond antique burnished rose metallic Manufacturer#1 Brand#14 PROMO PLATED TIN 2 JUMBO BOX 1173.15 e pinto beans h
+132666 almond aquamarine rose maroon antique Manufacturer#2 Brand#24 SMALL POLISHED NICKEL 25 MED BOX 1698.66 even
+144293 almond antique olive coral navajo Manufacturer#3 Brand#34 STANDARD POLISHED STEEL 45 JUMBO CAN 1337.29 ag furiously about
+146985 almond aquamarine midnight light salmon Manufacturer#2 Brand#23 MEDIUM BURNISHED COPPER 2 SM CASE 2031.98 s cajole caref
+15103 almond aquamarine dodger light gainsboro Manufacturer#5 Brand#53 ECONOMY BURNISHED STEEL 46 LG PACK 1018.1 packages hinder carefu
+155733 almond antique sky peru orange Manufacturer#5 Brand#53 SMALL PLATED BRASS 2 WRAP DRUM 1788.73 furiously. bra
+17273 almond antique forest lavender goldenrod Manufacturer#3 Brand#35 PROMO ANODIZED TIN 14 JUMBO CASE 1190.27 along the
+17927 almond aquamarine yellow dodger mint Manufacturer#4 Brand#41 ECONOMY BRUSHED COPPER 7 SM PKG 1844.92 ites. eve
+191709 almond antique violet turquoise frosted Manufacturer#2 Brand#22 ECONOMY POLISHED STEEL 40 MED BOX 1800.7 haggle
+192697 almond antique blue firebrick mint Manufacturer#5 Brand#52 MEDIUM BURNISHED TIN 31 LG DRUM 1789.69 ickly ir
+195606 almond aquamarine sandy cyan gainsboro Manufacturer#2 Brand#25 STANDARD PLATED TIN 18 SM PKG 1701.6 ic de
+33357 almond azure aquamarine papaya violet Manufacturer#4 Brand#41 STANDARD ANODIZED TIN 12 WRAP CASE 1290.35 reful
+40982 almond antique misty red olive Manufacturer#3 Brand#32 ECONOMY PLATED COPPER 1 LG PKG 1922.98 c foxes can s
+42669 almond antique medium spring khaki Manufacturer#5 Brand#51 STANDARD BURNISHED TIN 6 MED CAN 1611.66 sits haggl
+45261 almond aquamarine floral ivory bisque Manufacturer#4 Brand#42 SMALL PLATED STEEL 27 WRAP CASE 1206.26 careful
+48427 almond antique violet mint lemon Manufacturer#4 Brand#42 PROMO POLISHED STEEL 39 SM CASE 1375.42 hely ironic i
+49671 almond antique gainsboro frosted violet Manufacturer#4 Brand#41 SMALL BRUSHED BRASS 10 SM BOX 1620.67 ccounts run quick
+65667 almond aquamarine pink moccasin thistle Manufacturer#1 Brand#12 LARGE BURNISHED STEEL 42 JUMBO CASE 1632.66 e across the expr
+78486 almond azure blanched chiffon midnight Manufacturer#5 Brand#52 LARGE BRUSHED BRASS 23 MED BAG 1464.48 hely blith
+85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull
+86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully
+90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl
+PREHOOK: query: -- 9. testNoopWithMap
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmapstreaming(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 9. testNoopWithMap
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmapstreaming(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: p_name: string, p_mfgr: string, p_size: int
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmapstreaming
+ order by: p_name, p_size(DESC)
+ output shape: p_name: string, p_mfgr: string, p_size: int
+ partition by: p_mfgr
+ raw input shape:
+ transforms raw input: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Map-side function: true
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string), p_size (type: int)
+ sort order: ++-
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey2 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmapstreaming
+ order by: _col1, _col5(DESC)
+ output shape: _col1: string, _col2: string, _col5: int
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int)
+ sort order: ++-
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey2 (type: int)
+ outputColumnNames: _col1, _col2, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1, _col5(DESC)
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1, _col5
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmapstreaming(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name, p_size desc) as r
+from noopwithmapstreaming(on part
+partition by p_mfgr
+order by p_name, p_size desc)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1
+Manufacturer#1 almond antique burnished rose metallic 2 1
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4
+Manufacturer#1 almond aquamarine burnished black steel 28 5
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6
+Manufacturer#2 almond antique violet chocolate turquoise 14 1
+Manufacturer#2 almond antique violet turquoise frosted 40 2
+Manufacturer#2 almond aquamarine midnight light salmon 2 3
+Manufacturer#2 almond aquamarine rose maroon antique 25 4
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5
+Manufacturer#3 almond antique chartreuse khaki white 17 1
+Manufacturer#3 almond antique forest lavender goldenrod 14 2
+Manufacturer#3 almond antique metallic orange dim 19 3
+Manufacturer#3 almond antique misty red olive 1 4
+Manufacturer#3 almond antique olive coral navajo 45 5
+Manufacturer#4 almond antique gainsboro frosted violet 10 1
+Manufacturer#4 almond antique violet mint lemon 39 2
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4
+Manufacturer#4 almond azure aquamarine papaya violet 12 5
+Manufacturer#5 almond antique blue firebrick mint 31 1
+Manufacturer#5 almond antique medium spring khaki 6 2
+Manufacturer#5 almond antique sky peru orange 2 3
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4
+Manufacturer#5 almond azure blanched chiffon midnight 23 5
+PREHOOK: query: -- 10. testNoopWithMapWithWindowing
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmapstreaming(on part
+ partition by p_mfgr
+ order by p_name)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 10. testNoopWithMapWithWindowing
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmapstreaming(on part
+ partition by p_mfgr
+ order by p_name)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmapstreaming
+ order by: p_name
+ output shape: p_name: string, p_mfgr: string, p_size: int, p_retailprice: double
+ partition by: p_mfgr
+ raw input shape:
+ transforms raw input: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Map-side function: true
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int), p_retailprice (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmapstreaming
+ order by: _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: sum_window_2
+ arguments: _col7
+ name: sum
+ window function: GenericUDAFSumDouble
+ window frame: PRECEDING(MAX)~
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), sum_window_2 (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmapstreaming(on part
+ partition by p_mfgr
+ order by p_name)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopwithmapstreaming(on part
+ partition by p_mfgr
+ order by p_name)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001
+Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001
+Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68
+Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38
+Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001
+Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62
+Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68
+Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95
+Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34
+Manufacturer#3 almond antique misty red olive 1 4 4 6195.32
+Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61
+Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67
+Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27
+Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001
+Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69
+Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004
+Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18
+Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66
+PREHOOK: query: -- 12. testFunctionChain
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on noopwithmapstreaming(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 12. testFunctionChain
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on noopwithmapstreaming(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int), p_retailprice (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: PTFCOMPONENT
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmapstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Partition table definition
+ input alias: ptf_2
+ name: noopstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Map-side function: true
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: PTFCOMPONENT
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmapstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Partition table definition
+ input alias: ptf_2
+ name: noopstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: sum_window_2
+ arguments: _col7
+ name: sum
+ window function: GenericUDAFSumDouble
+ window frame: PRECEDING(MAX)~
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), sum_window_2 (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on noopwithmapstreaming(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on noopwithmapstreaming(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001
+Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001
+Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68
+Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38
+Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001
+Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62
+Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68
+Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95
+Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34
+Manufacturer#3 almond antique misty red olive 1 4 4 6195.32
+Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61
+Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67
+Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27
+Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001
+Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69
+Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004
+Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18
+Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66
+PREHOOK: query: -- 12.1 testFunctionChain
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on noopwithmap(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 12.1 testFunctionChain
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on noopwithmap(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int), p_retailprice (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: PTFCOMPONENT
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmap
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Partition table definition
+ input alias: ptf_2
+ name: noopstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Map-side function: true
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: PTFCOMPONENT
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmap
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Partition table definition
+ input alias: ptf_2
+ name: noopstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: sum_window_2
+ arguments: _col7
+ name: sum
+ window function: GenericUDAFSumDouble
+ window frame: PRECEDING(MAX)~
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), sum_window_2 (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on noopwithmap(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noopstreaming(on noopwithmap(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001
+Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001
+Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68
+Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38
+Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001
+Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62
+Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68
+Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95
+Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34
+Manufacturer#3 almond antique misty red olive 1 4 4 6195.32
+Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61
+Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67
+Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27
+Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001
+Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69
+Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004
+Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18
+Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66
+PREHOOK: query: -- 12.2 testFunctionChain
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on noopwithmapstreaming(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 12.2 testFunctionChain
+explain
+select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on noopwithmapstreaming(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_size (type: int), p_retailprice (type: double)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: part
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ name: noopstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: PTFCOMPONENT
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmapstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Partition table definition
+ input alias: ptf_2
+ name: noop
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Map-side function: true
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: PTFCOMPONENT
+ Partition table definition
+ input alias: ptf_1
+ name: noopwithmapstreaming
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ transforms raw input: true
+ Partition table definition
+ input alias: ptf_2
+ name: noop
+ order by: _col2, _col1
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ partition by: _col2
+ raw input shape:
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col2 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col2 (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col5 (type: int), _col7 (type: double)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: double)
+ outputColumnNames: _col1, _col2, _col5, _col7
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: ptf_0
+ output shape: _col1: string, _col2: string, _col5: int, _col7: double
+ type: WINDOWING
+ Windowing table definition
+ input alias: ptf_1
+ name: windowingtablefunction
+ order by: _col1
+ partition by: _col2
+ raw input shape:
+ window functions:
+ window function definition
+ alias: rank_window_0
+ arguments: _col1
+ name: rank
+ window function: GenericUDAFRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: dense_rank_window_1
+ arguments: _col1
+ name: dense_rank
+ window function: GenericUDAFDenseRankEvaluator
+ window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+ isPivotResult: true
+ window function definition
+ alias: sum_window_2
+ arguments: _col7
+ name: sum
+ window function: GenericUDAFSumDouble
+ window frame: PRECEDING(MAX)~
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), sum_window_2 (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on noopwithmapstreaming(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select p_mfgr, p_name, p_size,
+rank() over (partition by p_mfgr order by p_name) as r,
+dense_rank() over (partition by p_mfgr order by p_name) as dr,
+sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1
+from noop(on noopwithmapstreaming(on noopstreaming(on part
+partition by p_mfgr
+order by p_mfgr, p_name
+)))
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 1173.15
+Manufacturer#1 almond antique burnished rose metallic 2 1 1 2346.3
+Manufacturer#1 almond antique chartreuse lavender yellow 34 3 2 4100.06
+Manufacturer#1 almond antique salmon chartreuse burlywood 6 4 3 5702.650000000001
+Manufacturer#1 almond aquamarine burnished black steel 28 5 4 7117.070000000001
+Manufacturer#1 almond aquamarine pink moccasin thistle 42 6 5 8749.730000000001
+Manufacturer#2 almond antique violet chocolate turquoise 14 1 1 1690.68
+Manufacturer#2 almond antique violet turquoise frosted 40 2 2 3491.38
+Manufacturer#2 almond aquamarine midnight light salmon 2 3 3 5523.360000000001
+Manufacturer#2 almond aquamarine rose maroon antique 25 4 4 7222.02
+Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 5 5 8923.62
+Manufacturer#3 almond antique chartreuse khaki white 17 1 1 1671.68
+Manufacturer#3 almond antique forest lavender goldenrod 14 2 2 2861.95
+Manufacturer#3 almond antique metallic orange dim 19 3 3 4272.34
+Manufacturer#3 almond antique misty red olive 1 4 4 6195.32
+Manufacturer#3 almond antique olive coral navajo 45 5 5 7532.61
+Manufacturer#4 almond antique gainsboro frosted violet 10 1 1 1620.67
+Manufacturer#4 almond antique violet mint lemon 39 2 2 2996.09
+Manufacturer#4 almond aquamarine floral ivory bisque 27 3 3 4202.35
+Manufacturer#4 almond aquamarine yellow dodger mint 7 4 4 6047.27
+Manufacturer#4 almond azure aquamarine papaya violet 12 5 5 7337.620000000001
+Manufacturer#5 almond antique blue firebrick mint 31 1 1 1789.69
+Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004
+Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08
+Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18
+Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66
+PREHOOK: query: -- 14. testPTFJoinWithWindowingWithCount
+explain
+select abc.p_mfgr, abc.p_name,
+rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r,
+dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr,
+count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd,
+abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1,
+abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz
+from noopstreaming(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 14. testPTFJoinWithWindowingWithCount
+explain
+select abc.p_mfgr, abc.p_name,
+rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r,
+dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr,
+count(abc.p_name) over (distribute by abc.p_mfgr sort by abc.p_name) as cd,
+abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1,
+abc.p_size, abc.p_size - lag(abc.p_size,1,abc.p_size) over (distribute by abc.p_mfgr sort by abc.p_name) as deltaSz
+from noopstreaming(on part
+partition by p_mfgr
+order by p_name
+) abc join part p1 on abc.p_partkey = p1.p_partkey
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: part
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
+ Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+ value expressions: p_partkey (type: int), p_size (type: int), p_retailprice (type: double)
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
<TRUNCATED>
[03/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/ptf_matchpath.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/ptf_matchpath.q.out b/ql/src/test/results/clientpositive/llap/ptf_matchpath.q.out
new file mode 100644
index 0000000..78e84a0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/ptf_matchpath.q.out
@@ -0,0 +1,403 @@
+PREHOOK: query: DROP TABLE flights_tiny
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE flights_tiny
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table flights_tiny (
+ORIGIN_CITY_NAME string,
+DEST_CITY_NAME string,
+YEAR int,
+MONTH int,
+DAY_OF_MONTH int,
+ARR_DELAY float,
+FL_NUM string
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@flights_tiny
+POSTHOOK: query: create table flights_tiny (
+ORIGIN_CITY_NAME string,
+DEST_CITY_NAME string,
+YEAR int,
+MONTH int,
+DAY_OF_MONTH int,
+ARR_DELAY float,
+FL_NUM string
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@flights_tiny
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@flights_tiny
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@flights_tiny
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 1. basic Matchpath test
+explain
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ flights_tiny
+ distribute by fl_num
+ sort by year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 1. basic Matchpath test
+explain
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ flights_tiny
+ distribute by fl_num
+ sort by year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: flights_tiny
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: fl_num (type: string), year (type: int), month (type: int), day_of_month (type: int)
+ sort order: ++++
+ Map-reduce partition columns: fl_num (type: string)
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ value expressions: origin_city_name (type: string), arr_delay (type: float)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: int), VALUE._col2 (type: float), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col2, _col3, _col4, _col5, _col6
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: flights_tiny
+ output shape:
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath'
+ name: matchpath
+ order by: _col2, _col3, _col4
+ output shape: tpath: int
+ partition by: _col6
+ raw input shape:
+ referenced columns: _col0, _col6, _col2, _col3, _col4, tpath, _col5
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: origin_city_name (type: string), fl_num (type: string), year (type: int), month (type: int), day_of_month (type: int), sz (type: int), tpath (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ flights_tiny
+ distribute by fl_num
+ sort by year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@flights_tiny
+#### A masked pattern was here ####
+POSTHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ flights_tiny
+ distribute by fl_num
+ sort by year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@flights_tiny
+#### A masked pattern was here ####
+Baltimore 1142 2010 10 20 6 20
+Baltimore 1142 2010 10 21 5 21
+Baltimore 1142 2010 10 22 4 22
+Baltimore 1142 2010 10 25 3 25
+Baltimore 1142 2010 10 26 2 26
+Baltimore 1599 2010 10 21 2 21
+Baltimore 1599 2010 10 25 3 25
+Baltimore 1599 2010 10 26 2 26
+Chicago 1531 2010 10 21 2 21
+Chicago 1531 2010 10 25 3 25
+Chicago 1531 2010 10 26 2 26
+Chicago 361 2010 10 20 2 20
+Chicago 897 2010 10 20 4 20
+Chicago 897 2010 10 21 3 21
+Chicago 897 2010 10 22 2 22
+Washington 7291 2010 10 27 2 27
+PREHOOK: query: -- 2. Matchpath on 1 partition
+explain
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ flights_tiny
+ sort by fl_num, year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+where fl_num = 1142
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 2. Matchpath on 1 partition
+explain
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ flights_tiny
+ sort by fl_num, year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+where fl_num = 1142
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: flights_tiny
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: 0 (type: int), fl_num (type: string), year (type: int), month (type: int), day_of_month (type: int)
+ sort order: +++++
+ Map-reduce partition columns: 0 (type: int)
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ value expressions: origin_city_name (type: string), arr_delay (type: float)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: int), KEY.reducesinkkey4 (type: int), VALUE._col2 (type: float), KEY.reducesinkkey1 (type: string)
+ outputColumnNames: _col0, _col2, _col3, _col4, _col5, _col6
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: flights_tiny
+ output shape:
+ type: TABLE
+ Partition table definition
+ input alias: ptf_1
+ arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath'
+ name: matchpath
+ order by: _col6, _col2, _col3, _col4
+ output shape: tpath: int
+ partition by: 0
+ raw input shape:
+ referenced columns: _col0, _col6, _col2, _col3, _col4, tpath, _col5
+ Statistics: Num rows: 24 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (fl_num = 1142) (type: boolean)
+ Statistics: Num rows: 12 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: origin_city_name (type: string), '1142' (type: string), year (type: int), month (type: int), day_of_month (type: int), sz (type: int), tpath (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+ Statistics: Num rows: 12 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 12 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ flights_tiny
+ sort by fl_num, year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+where fl_num = 1142
+PREHOOK: type: QUERY
+PREHOOK: Input: default@flights_tiny
+#### A masked pattern was here ####
+POSTHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ flights_tiny
+ sort by fl_num, year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+where fl_num = 1142
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@flights_tiny
+#### A masked pattern was here ####
+Baltimore 1142 2010 10 20 6 20
+Baltimore 1142 2010 10 21 5 21
+Baltimore 1142 2010 10 22 4 22
+Baltimore 1142 2010 10 25 3 25
+Baltimore 1142 2010 10 26 2 26
+PREHOOK: query: -- 3. empty partition.
+explain
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ (select * from flights_tiny where fl_num = -1142) flights_tiny
+ sort by fl_num, year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3. empty partition.
+explain
+select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ (select * from flights_tiny where fl_num = -1142) flights_tiny
+ sort by fl_num, year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: flights_tiny
+ Statistics: Num rows: 44 Data size: 5379 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (fl_num = -1142) (type: boolean)
+ Statistics: Num rows: 22 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: origin_city_name (type: string), year (type: int), month (type: int), day_of_month (type: int), arr_delay (type: float)
+ outputColumnNames: _col0, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 22 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: 0 (type: int), '-1142' (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: int)
+ sort order: +++++
+ Map-reduce partition columns: 0 (type: int)
+ Statistics: Num rows: 22 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string), _col5 (type: float)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: int), KEY.reducesinkkey4 (type: int), VALUE._col2 (type: float), '-1142' (type: string)
+ outputColumnNames: _col0, _col2, _col3, _col4, _col5, _col6
+ Statistics: Num rows: 22 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ PTF Operator
+ Function definitions:
+ Input definition
+ input alias: flights_tiny
+ output shape:
+ type: SUBQUERY
+ Partition table definition
+ input alias: ptf_1
+ arguments: 'LATE.LATE+', 'LATE', (_col5 > 15.0), 'origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath'
+ name: matchpath
+ order by: _col6, _col2, _col3, _col4
+ output shape: tpath: int
+ partition by: 0
+ raw input shape:
+ referenced columns: _col0, _col6, _col2, _col3, _col4, tpath, _col5
+ Statistics: Num rows: 22 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: origin_city_name (type: string), '-1142' (type: string), year (type: int), month (type: int), day_of_month (type: int), sz (type: int), tpath (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+ Statistics: Num rows: 22 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 22 Data size: 2689 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ (select * from flights_tiny where fl_num = -1142) flights_tiny
+ sort by fl_num, year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@flights_tiny
+#### A masked pattern was here ####
+POSTHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
+from matchpath(on
+ (select * from flights_tiny where fl_num = -1142) flights_tiny
+ sort by fl_num, year, month, day_of_month
+ arg1('LATE.LATE+'),
+ arg2('LATE'), arg3(arr_delay > 15),
+ arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath')
+ )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@flights_tiny
+#### A masked pattern was here ####
[14/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out b/ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out
new file mode 100644
index 0000000..65a3037
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/load_dyn_part3.q.out
@@ -0,0 +1,2138 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+show partitions srcpart
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@srcpart
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+show partitions srcpart
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@srcpart
+ds=2008-04-08/hr=11
+ds=2008-04-08/hr=12
+ds=2008-04-09/hr=11
+ds=2008-04-09/hr=12
+PREHOOK: query: create table if not exists nzhang_part3 like srcpart
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nzhang_part3
+POSTHOOK: query: create table if not exists nzhang_part3 like srcpart
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@nzhang_part3
+PREHOOK: query: describe extended nzhang_part3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part3
+POSTHOOK: query: describe extended nzhang_part3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part3
+key string default
+value string default
+ds string
+hr string
+
+# Partition Information
+# col_name data_type comment
+
+ds string
+hr string
+
+#### A masked pattern was here ####
+PREHOOK: query: explain
+insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: srcpart
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), ds (type: string), hr (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part3
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ ds
+ hr
+ replace: true
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.nzhang_part3
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@nzhang_part3
+POSTHOOK: query: insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@nzhang_part3@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@nzhang_part3@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@nzhang_part3@ds=2008-04-09/hr=11
+POSTHOOK: Output: default@nzhang_part3@ds=2008-04-09/hr=12
+POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_part3 PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from nzhang_part3 where ds is not null and hr is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@nzhang_part3
+PREHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=11
+PREHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=12
+PREHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=11
+PREHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from nzhang_part3 where ds is not null and hr is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@nzhang_part3
+POSTHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@nzhang_part3@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@nzhang_part3@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 11
+0 val_0 2008-04-08 12
+0 val_0 2008-04-08 12
+0 val_0 2008-04-08 12
+0 val_0 2008-04-09 11
+0 val_0 2008-04-09 11
+0 val_0 2008-04-09 11
+0 val_0 2008-04-09 12
+0 val_0 2008-04-09 12
+0 val_0 2008-04-09 12
+10 val_10 2008-04-08 11
+10 val_10 2008-04-08 12
+10 val_10 2008-04-09 11
+10 val_10 2008-04-09 12
+100 val_100 2008-04-08 11
+100 val_100 2008-04-08 11
+100 val_100 2008-04-08 12
+100 val_100 2008-04-08 12
+100 val_100 2008-04-09 11
+100 val_100 2008-04-09 11
+100 val_100 2008-04-09 12
+100 val_100 2008-04-09 12
+103 val_103 2008-04-08 11
+103 val_103 2008-04-08 11
+103 val_103 2008-04-08 12
+103 val_103 2008-04-08 12
+103 val_103 2008-04-09 11
+103 val_103 2008-04-09 11
+103 val_103 2008-04-09 12
+103 val_103 2008-04-09 12
+104 val_104 2008-04-08 11
+104 val_104 2008-04-08 11
+104 val_104 2008-04-08 12
+104 val_104 2008-04-08 12
+104 val_104 2008-04-09 11
+104 val_104 2008-04-09 11
+104 val_104 2008-04-09 12
+104 val_104 2008-04-09 12
+105 val_105 2008-04-08 11
+105 val_105 2008-04-08 12
+105 val_105 2008-04-09 11
+105 val_105 2008-04-09 12
+11 val_11 2008-04-08 11
+11 val_11 2008-04-08 12
+11 val_11 2008-04-09 11
+11 val_11 2008-04-09 12
+111 val_111 2008-04-08 11
+111 val_111 2008-04-08 12
+111 val_111 2008-04-09 11
+111 val_111 2008-04-09 12
+113 val_113 2008-04-08 11
+113 val_113 2008-04-08 11
+113 val_113 2008-04-08 12
+113 val_113 2008-04-08 12
+113 val_113 2008-04-09 11
+113 val_113 2008-04-09 11
+113 val_113 2008-04-09 12
+113 val_113 2008-04-09 12
+114 val_114 2008-04-08 11
+114 val_114 2008-04-08 12
+114 val_114 2008-04-09 11
+114 val_114 2008-04-09 12
+116 val_116 2008-04-08 11
+116 val_116 2008-04-08 12
+116 val_116 2008-04-09 11
+116 val_116 2008-04-09 12
+118 val_118 2008-04-08 11
+118 val_118 2008-04-08 11
+118 val_118 2008-04-08 12
+118 val_118 2008-04-08 12
+118 val_118 2008-04-09 11
+118 val_118 2008-04-09 11
+118 val_118 2008-04-09 12
+118 val_118 2008-04-09 12
+119 val_119 2008-04-08 11
+119 val_119 2008-04-08 11
+119 val_119 2008-04-08 11
+119 val_119 2008-04-08 12
+119 val_119 2008-04-08 12
+119 val_119 2008-04-08 12
+119 val_119 2008-04-09 11
+119 val_119 2008-04-09 11
+119 val_119 2008-04-09 11
+119 val_119 2008-04-09 12
+119 val_119 2008-04-09 12
+119 val_119 2008-04-09 12
+12 val_12 2008-04-08 11
+12 val_12 2008-04-08 11
+12 val_12 2008-04-08 12
+12 val_12 2008-04-08 12
+12 val_12 2008-04-09 11
+12 val_12 2008-04-09 11
+12 val_12 2008-04-09 12
+12 val_12 2008-04-09 12
+120 val_120 2008-04-08 11
+120 val_120 2008-04-08 11
+120 val_120 2008-04-08 12
+120 val_120 2008-04-08 12
+120 val_120 2008-04-09 11
+120 val_120 2008-04-09 11
+120 val_120 2008-04-09 12
+120 val_120 2008-04-09 12
+125 val_125 2008-04-08 11
+125 val_125 2008-04-08 11
+125 val_125 2008-04-08 12
+125 val_125 2008-04-08 12
+125 val_125 2008-04-09 11
+125 val_125 2008-04-09 11
+125 val_125 2008-04-09 12
+125 val_125 2008-04-09 12
+126 val_126 2008-04-08 11
+126 val_126 2008-04-08 12
+126 val_126 2008-04-09 11
+126 val_126 2008-04-09 12
+128 val_128 2008-04-08 11
+128 val_128 2008-04-08 11
+128 val_128 2008-04-08 11
+128 val_128 2008-04-08 12
+128 val_128 2008-04-08 12
+128 val_128 2008-04-08 12
+128 val_128 2008-04-09 11
+128 val_128 2008-04-09 11
+128 val_128 2008-04-09 11
+128 val_128 2008-04-09 12
+128 val_128 2008-04-09 12
+128 val_128 2008-04-09 12
+129 val_129 2008-04-08 11
+129 val_129 2008-04-08 11
+129 val_129 2008-04-08 12
+129 val_129 2008-04-08 12
+129 val_129 2008-04-09 11
+129 val_129 2008-04-09 11
+129 val_129 2008-04-09 12
+129 val_129 2008-04-09 12
+131 val_131 2008-04-08 11
+131 val_131 2008-04-08 12
+131 val_131 2008-04-09 11
+131 val_131 2008-04-09 12
+133 val_133 2008-04-08 11
+133 val_133 2008-04-08 12
+133 val_133 2008-04-09 11
+133 val_133 2008-04-09 12
+134 val_134 2008-04-08 11
+134 val_134 2008-04-08 11
+134 val_134 2008-04-08 12
+134 val_134 2008-04-08 12
+134 val_134 2008-04-09 11
+134 val_134 2008-04-09 11
+134 val_134 2008-04-09 12
+134 val_134 2008-04-09 12
+136 val_136 2008-04-08 11
+136 val_136 2008-04-08 12
+136 val_136 2008-04-09 11
+136 val_136 2008-04-09 12
+137 val_137 2008-04-08 11
+137 val_137 2008-04-08 11
+137 val_137 2008-04-08 12
+137 val_137 2008-04-08 12
+137 val_137 2008-04-09 11
+137 val_137 2008-04-09 11
+137 val_137 2008-04-09 12
+137 val_137 2008-04-09 12
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 11
+138 val_138 2008-04-08 12
+138 val_138 2008-04-08 12
+138 val_138 2008-04-08 12
+138 val_138 2008-04-08 12
+138 val_138 2008-04-09 11
+138 val_138 2008-04-09 11
+138 val_138 2008-04-09 11
+138 val_138 2008-04-09 11
+138 val_138 2008-04-09 12
+138 val_138 2008-04-09 12
+138 val_138 2008-04-09 12
+138 val_138 2008-04-09 12
+143 val_143 2008-04-08 11
+143 val_143 2008-04-08 12
+143 val_143 2008-04-09 11
+143 val_143 2008-04-09 12
+145 val_145 2008-04-08 11
+145 val_145 2008-04-08 12
+145 val_145 2008-04-09 11
+145 val_145 2008-04-09 12
+146 val_146 2008-04-08 11
+146 val_146 2008-04-08 11
+146 val_146 2008-04-08 12
+146 val_146 2008-04-08 12
+146 val_146 2008-04-09 11
+146 val_146 2008-04-09 11
+146 val_146 2008-04-09 12
+146 val_146 2008-04-09 12
+149 val_149 2008-04-08 11
+149 val_149 2008-04-08 11
+149 val_149 2008-04-08 12
+149 val_149 2008-04-08 12
+149 val_149 2008-04-09 11
+149 val_149 2008-04-09 11
+149 val_149 2008-04-09 12
+149 val_149 2008-04-09 12
+15 val_15 2008-04-08 11
+15 val_15 2008-04-08 11
+15 val_15 2008-04-08 12
+15 val_15 2008-04-08 12
+15 val_15 2008-04-09 11
+15 val_15 2008-04-09 11
+15 val_15 2008-04-09 12
+15 val_15 2008-04-09 12
+150 val_150 2008-04-08 11
+150 val_150 2008-04-08 12
+150 val_150 2008-04-09 11
+150 val_150 2008-04-09 12
+152 val_152 2008-04-08 11
+152 val_152 2008-04-08 11
+152 val_152 2008-04-08 12
+152 val_152 2008-04-08 12
+152 val_152 2008-04-09 11
+152 val_152 2008-04-09 11
+152 val_152 2008-04-09 12
+152 val_152 2008-04-09 12
+153 val_153 2008-04-08 11
+153 val_153 2008-04-08 12
+153 val_153 2008-04-09 11
+153 val_153 2008-04-09 12
+155 val_155 2008-04-08 11
+155 val_155 2008-04-08 12
+155 val_155 2008-04-09 11
+155 val_155 2008-04-09 12
+156 val_156 2008-04-08 11
+156 val_156 2008-04-08 12
+156 val_156 2008-04-09 11
+156 val_156 2008-04-09 12
+157 val_157 2008-04-08 11
+157 val_157 2008-04-08 12
+157 val_157 2008-04-09 11
+157 val_157 2008-04-09 12
+158 val_158 2008-04-08 11
+158 val_158 2008-04-08 12
+158 val_158 2008-04-09 11
+158 val_158 2008-04-09 12
+160 val_160 2008-04-08 11
+160 val_160 2008-04-08 12
+160 val_160 2008-04-09 11
+160 val_160 2008-04-09 12
+162 val_162 2008-04-08 11
+162 val_162 2008-04-08 12
+162 val_162 2008-04-09 11
+162 val_162 2008-04-09 12
+163 val_163 2008-04-08 11
+163 val_163 2008-04-08 12
+163 val_163 2008-04-09 11
+163 val_163 2008-04-09 12
+164 val_164 2008-04-08 11
+164 val_164 2008-04-08 11
+164 val_164 2008-04-08 12
+164 val_164 2008-04-08 12
+164 val_164 2008-04-09 11
+164 val_164 2008-04-09 11
+164 val_164 2008-04-09 12
+164 val_164 2008-04-09 12
+165 val_165 2008-04-08 11
+165 val_165 2008-04-08 11
+165 val_165 2008-04-08 12
+165 val_165 2008-04-08 12
+165 val_165 2008-04-09 11
+165 val_165 2008-04-09 11
+165 val_165 2008-04-09 12
+165 val_165 2008-04-09 12
+166 val_166 2008-04-08 11
+166 val_166 2008-04-08 12
+166 val_166 2008-04-09 11
+166 val_166 2008-04-09 12
+167 val_167 2008-04-08 11
+167 val_167 2008-04-08 11
+167 val_167 2008-04-08 11
+167 val_167 2008-04-08 12
+167 val_167 2008-04-08 12
+167 val_167 2008-04-08 12
+167 val_167 2008-04-09 11
+167 val_167 2008-04-09 11
+167 val_167 2008-04-09 11
+167 val_167 2008-04-09 12
+167 val_167 2008-04-09 12
+167 val_167 2008-04-09 12
+168 val_168 2008-04-08 11
+168 val_168 2008-04-08 12
+168 val_168 2008-04-09 11
+168 val_168 2008-04-09 12
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 11
+169 val_169 2008-04-08 12
+169 val_169 2008-04-08 12
+169 val_169 2008-04-08 12
+169 val_169 2008-04-08 12
+169 val_169 2008-04-09 11
+169 val_169 2008-04-09 11
+169 val_169 2008-04-09 11
+169 val_169 2008-04-09 11
+169 val_169 2008-04-09 12
+169 val_169 2008-04-09 12
+169 val_169 2008-04-09 12
+169 val_169 2008-04-09 12
+17 val_17 2008-04-08 11
+17 val_17 2008-04-08 12
+17 val_17 2008-04-09 11
+17 val_17 2008-04-09 12
+170 val_170 2008-04-08 11
+170 val_170 2008-04-08 12
+170 val_170 2008-04-09 11
+170 val_170 2008-04-09 12
+172 val_172 2008-04-08 11
+172 val_172 2008-04-08 11
+172 val_172 2008-04-08 12
+172 val_172 2008-04-08 12
+172 val_172 2008-04-09 11
+172 val_172 2008-04-09 11
+172 val_172 2008-04-09 12
+172 val_172 2008-04-09 12
+174 val_174 2008-04-08 11
+174 val_174 2008-04-08 11
+174 val_174 2008-04-08 12
+174 val_174 2008-04-08 12
+174 val_174 2008-04-09 11
+174 val_174 2008-04-09 11
+174 val_174 2008-04-09 12
+174 val_174 2008-04-09 12
+175 val_175 2008-04-08 11
+175 val_175 2008-04-08 11
+175 val_175 2008-04-08 12
+175 val_175 2008-04-08 12
+175 val_175 2008-04-09 11
+175 val_175 2008-04-09 11
+175 val_175 2008-04-09 12
+175 val_175 2008-04-09 12
+176 val_176 2008-04-08 11
+176 val_176 2008-04-08 11
+176 val_176 2008-04-08 12
+176 val_176 2008-04-08 12
+176 val_176 2008-04-09 11
+176 val_176 2008-04-09 11
+176 val_176 2008-04-09 12
+176 val_176 2008-04-09 12
+177 val_177 2008-04-08 11
+177 val_177 2008-04-08 12
+177 val_177 2008-04-09 11
+177 val_177 2008-04-09 12
+178 val_178 2008-04-08 11
+178 val_178 2008-04-08 12
+178 val_178 2008-04-09 11
+178 val_178 2008-04-09 12
+179 val_179 2008-04-08 11
+179 val_179 2008-04-08 11
+179 val_179 2008-04-08 12
+179 val_179 2008-04-08 12
+179 val_179 2008-04-09 11
+179 val_179 2008-04-09 11
+179 val_179 2008-04-09 12
+179 val_179 2008-04-09 12
+18 val_18 2008-04-08 11
+18 val_18 2008-04-08 11
+18 val_18 2008-04-08 12
+18 val_18 2008-04-08 12
+18 val_18 2008-04-09 11
+18 val_18 2008-04-09 11
+18 val_18 2008-04-09 12
+18 val_18 2008-04-09 12
+180 val_180 2008-04-08 11
+180 val_180 2008-04-08 12
+180 val_180 2008-04-09 11
+180 val_180 2008-04-09 12
+181 val_181 2008-04-08 11
+181 val_181 2008-04-08 12
+181 val_181 2008-04-09 11
+181 val_181 2008-04-09 12
+183 val_183 2008-04-08 11
+183 val_183 2008-04-08 12
+183 val_183 2008-04-09 11
+183 val_183 2008-04-09 12
+186 val_186 2008-04-08 11
+186 val_186 2008-04-08 12
+186 val_186 2008-04-09 11
+186 val_186 2008-04-09 12
+187 val_187 2008-04-08 11
+187 val_187 2008-04-08 11
+187 val_187 2008-04-08 11
+187 val_187 2008-04-08 12
+187 val_187 2008-04-08 12
+187 val_187 2008-04-08 12
+187 val_187 2008-04-09 11
+187 val_187 2008-04-09 11
+187 val_187 2008-04-09 11
+187 val_187 2008-04-09 12
+187 val_187 2008-04-09 12
+187 val_187 2008-04-09 12
+189 val_189 2008-04-08 11
+189 val_189 2008-04-08 12
+189 val_189 2008-04-09 11
+189 val_189 2008-04-09 12
+19 val_19 2008-04-08 11
+19 val_19 2008-04-08 12
+19 val_19 2008-04-09 11
+19 val_19 2008-04-09 12
+190 val_190 2008-04-08 11
+190 val_190 2008-04-08 12
+190 val_190 2008-04-09 11
+190 val_190 2008-04-09 12
+191 val_191 2008-04-08 11
+191 val_191 2008-04-08 11
+191 val_191 2008-04-08 12
+191 val_191 2008-04-08 12
+191 val_191 2008-04-09 11
+191 val_191 2008-04-09 11
+191 val_191 2008-04-09 12
+191 val_191 2008-04-09 12
+192 val_192 2008-04-08 11
+192 val_192 2008-04-08 12
+192 val_192 2008-04-09 11
+192 val_192 2008-04-09 12
+193 val_193 2008-04-08 11
+193 val_193 2008-04-08 11
+193 val_193 2008-04-08 11
+193 val_193 2008-04-08 12
+193 val_193 2008-04-08 12
+193 val_193 2008-04-08 12
+193 val_193 2008-04-09 11
+193 val_193 2008-04-09 11
+193 val_193 2008-04-09 11
+193 val_193 2008-04-09 12
+193 val_193 2008-04-09 12
+193 val_193 2008-04-09 12
+194 val_194 2008-04-08 11
+194 val_194 2008-04-08 12
+194 val_194 2008-04-09 11
+194 val_194 2008-04-09 12
+195 val_195 2008-04-08 11
+195 val_195 2008-04-08 11
+195 val_195 2008-04-08 12
+195 val_195 2008-04-08 12
+195 val_195 2008-04-09 11
+195 val_195 2008-04-09 11
+195 val_195 2008-04-09 12
+195 val_195 2008-04-09 12
+196 val_196 2008-04-08 11
+196 val_196 2008-04-08 12
+196 val_196 2008-04-09 11
+196 val_196 2008-04-09 12
+197 val_197 2008-04-08 11
+197 val_197 2008-04-08 11
+197 val_197 2008-04-08 12
+197 val_197 2008-04-08 12
+197 val_197 2008-04-09 11
+197 val_197 2008-04-09 11
+197 val_197 2008-04-09 12
+197 val_197 2008-04-09 12
+199 val_199 2008-04-08 11
+199 val_199 2008-04-08 11
+199 val_199 2008-04-08 11
+199 val_199 2008-04-08 12
+199 val_199 2008-04-08 12
+199 val_199 2008-04-08 12
+199 val_199 2008-04-09 11
+199 val_199 2008-04-09 11
+199 val_199 2008-04-09 11
+199 val_199 2008-04-09 12
+199 val_199 2008-04-09 12
+199 val_199 2008-04-09 12
+2 val_2 2008-04-08 11
+2 val_2 2008-04-08 12
+2 val_2 2008-04-09 11
+2 val_2 2008-04-09 12
+20 val_20 2008-04-08 11
+20 val_20 2008-04-08 12
+20 val_20 2008-04-09 11
+20 val_20 2008-04-09 12
+200 val_200 2008-04-08 11
+200 val_200 2008-04-08 11
+200 val_200 2008-04-08 12
+200 val_200 2008-04-08 12
+200 val_200 2008-04-09 11
+200 val_200 2008-04-09 11
+200 val_200 2008-04-09 12
+200 val_200 2008-04-09 12
+201 val_201 2008-04-08 11
+201 val_201 2008-04-08 12
+201 val_201 2008-04-09 11
+201 val_201 2008-04-09 12
+202 val_202 2008-04-08 11
+202 val_202 2008-04-08 12
+202 val_202 2008-04-09 11
+202 val_202 2008-04-09 12
+203 val_203 2008-04-08 11
+203 val_203 2008-04-08 11
+203 val_203 2008-04-08 12
+203 val_203 2008-04-08 12
+203 val_203 2008-04-09 11
+203 val_203 2008-04-09 11
+203 val_203 2008-04-09 12
+203 val_203 2008-04-09 12
+205 val_205 2008-04-08 11
+205 val_205 2008-04-08 11
+205 val_205 2008-04-08 12
+205 val_205 2008-04-08 12
+205 val_205 2008-04-09 11
+205 val_205 2008-04-09 11
+205 val_205 2008-04-09 12
+205 val_205 2008-04-09 12
+207 val_207 2008-04-08 11
+207 val_207 2008-04-08 11
+207 val_207 2008-04-08 12
+207 val_207 2008-04-08 12
+207 val_207 2008-04-09 11
+207 val_207 2008-04-09 11
+207 val_207 2008-04-09 12
+207 val_207 2008-04-09 12
+208 val_208 2008-04-08 11
+208 val_208 2008-04-08 11
+208 val_208 2008-04-08 11
+208 val_208 2008-04-08 12
+208 val_208 2008-04-08 12
+208 val_208 2008-04-08 12
+208 val_208 2008-04-09 11
+208 val_208 2008-04-09 11
+208 val_208 2008-04-09 11
+208 val_208 2008-04-09 12
+208 val_208 2008-04-09 12
+208 val_208 2008-04-09 12
+209 val_209 2008-04-08 11
+209 val_209 2008-04-08 11
+209 val_209 2008-04-08 12
+209 val_209 2008-04-08 12
+209 val_209 2008-04-09 11
+209 val_209 2008-04-09 11
+209 val_209 2008-04-09 12
+209 val_209 2008-04-09 12
+213 val_213 2008-04-08 11
+213 val_213 2008-04-08 11
+213 val_213 2008-04-08 12
+213 val_213 2008-04-08 12
+213 val_213 2008-04-09 11
+213 val_213 2008-04-09 11
+213 val_213 2008-04-09 12
+213 val_213 2008-04-09 12
+214 val_214 2008-04-08 11
+214 val_214 2008-04-08 12
+214 val_214 2008-04-09 11
+214 val_214 2008-04-09 12
+216 val_216 2008-04-08 11
+216 val_216 2008-04-08 11
+216 val_216 2008-04-08 12
+216 val_216 2008-04-08 12
+216 val_216 2008-04-09 11
+216 val_216 2008-04-09 11
+216 val_216 2008-04-09 12
+216 val_216 2008-04-09 12
+217 val_217 2008-04-08 11
+217 val_217 2008-04-08 11
+217 val_217 2008-04-08 12
+217 val_217 2008-04-08 12
+217 val_217 2008-04-09 11
+217 val_217 2008-04-09 11
+217 val_217 2008-04-09 12
+217 val_217 2008-04-09 12
+218 val_218 2008-04-08 11
+218 val_218 2008-04-08 12
+218 val_218 2008-04-09 11
+218 val_218 2008-04-09 12
+219 val_219 2008-04-08 11
+219 val_219 2008-04-08 11
+219 val_219 2008-04-08 12
+219 val_219 2008-04-08 12
+219 val_219 2008-04-09 11
+219 val_219 2008-04-09 11
+219 val_219 2008-04-09 12
+219 val_219 2008-04-09 12
+221 val_221 2008-04-08 11
+221 val_221 2008-04-08 11
+221 val_221 2008-04-08 12
+221 val_221 2008-04-08 12
+221 val_221 2008-04-09 11
+221 val_221 2008-04-09 11
+221 val_221 2008-04-09 12
+221 val_221 2008-04-09 12
+222 val_222 2008-04-08 11
+222 val_222 2008-04-08 12
+222 val_222 2008-04-09 11
+222 val_222 2008-04-09 12
+223 val_223 2008-04-08 11
+223 val_223 2008-04-08 11
+223 val_223 2008-04-08 12
+223 val_223 2008-04-08 12
+223 val_223 2008-04-09 11
+223 val_223 2008-04-09 11
+223 val_223 2008-04-09 12
+223 val_223 2008-04-09 12
+224 val_224 2008-04-08 11
+224 val_224 2008-04-08 11
+224 val_224 2008-04-08 12
+224 val_224 2008-04-08 12
+224 val_224 2008-04-09 11
+224 val_224 2008-04-09 11
+224 val_224 2008-04-09 12
+224 val_224 2008-04-09 12
+226 val_226 2008-04-08 11
+226 val_226 2008-04-08 12
+226 val_226 2008-04-09 11
+226 val_226 2008-04-09 12
+228 val_228 2008-04-08 11
+228 val_228 2008-04-08 12
+228 val_228 2008-04-09 11
+228 val_228 2008-04-09 12
+229 val_229 2008-04-08 11
+229 val_229 2008-04-08 11
+229 val_229 2008-04-08 12
+229 val_229 2008-04-08 12
+229 val_229 2008-04-09 11
+229 val_229 2008-04-09 11
+229 val_229 2008-04-09 12
+229 val_229 2008-04-09 12
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 11
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-08 12
+230 val_230 2008-04-09 11
+230 val_230 2008-04-09 11
+230 val_230 2008-04-09 11
+230 val_230 2008-04-09 11
+230 val_230 2008-04-09 11
+230 val_230 2008-04-09 12
+230 val_230 2008-04-09 12
+230 val_230 2008-04-09 12
+230 val_230 2008-04-09 12
+230 val_230 2008-04-09 12
+233 val_233 2008-04-08 11
+233 val_233 2008-04-08 11
+233 val_233 2008-04-08 12
+233 val_233 2008-04-08 12
+233 val_233 2008-04-09 11
+233 val_233 2008-04-09 11
+233 val_233 2008-04-09 12
+233 val_233 2008-04-09 12
+235 val_235 2008-04-08 11
+235 val_235 2008-04-08 12
+235 val_235 2008-04-09 11
+235 val_235 2008-04-09 12
+237 val_237 2008-04-08 11
+237 val_237 2008-04-08 11
+237 val_237 2008-04-08 12
+237 val_237 2008-04-08 12
+237 val_237 2008-04-09 11
+237 val_237 2008-04-09 11
+237 val_237 2008-04-09 12
+237 val_237 2008-04-09 12
+238 val_238 2008-04-08 11
+238 val_238 2008-04-08 11
+238 val_238 2008-04-08 12
+238 val_238 2008-04-08 12
+238 val_238 2008-04-09 11
+238 val_238 2008-04-09 11
+238 val_238 2008-04-09 12
+238 val_238 2008-04-09 12
+239 val_239 2008-04-08 11
+239 val_239 2008-04-08 11
+239 val_239 2008-04-08 12
+239 val_239 2008-04-08 12
+239 val_239 2008-04-09 11
+239 val_239 2008-04-09 11
+239 val_239 2008-04-09 12
+239 val_239 2008-04-09 12
+24 val_24 2008-04-08 11
+24 val_24 2008-04-08 11
+24 val_24 2008-04-08 12
+24 val_24 2008-04-08 12
+24 val_24 2008-04-09 11
+24 val_24 2008-04-09 11
+24 val_24 2008-04-09 12
+24 val_24 2008-04-09 12
+241 val_241 2008-04-08 11
+241 val_241 2008-04-08 12
+241 val_241 2008-04-09 11
+241 val_241 2008-04-09 12
+242 val_242 2008-04-08 11
+242 val_242 2008-04-08 11
+242 val_242 2008-04-08 12
+242 val_242 2008-04-08 12
+242 val_242 2008-04-09 11
+242 val_242 2008-04-09 11
+242 val_242 2008-04-09 12
+242 val_242 2008-04-09 12
+244 val_244 2008-04-08 11
+244 val_244 2008-04-08 12
+244 val_244 2008-04-09 11
+244 val_244 2008-04-09 12
+247 val_247 2008-04-08 11
+247 val_247 2008-04-08 12
+247 val_247 2008-04-09 11
+247 val_247 2008-04-09 12
+248 val_248 2008-04-08 11
+248 val_248 2008-04-08 12
+248 val_248 2008-04-09 11
+248 val_248 2008-04-09 12
+249 val_249 2008-04-08 11
+249 val_249 2008-04-08 12
+249 val_249 2008-04-09 11
+249 val_249 2008-04-09 12
+252 val_252 2008-04-08 11
+252 val_252 2008-04-08 12
+252 val_252 2008-04-09 11
+252 val_252 2008-04-09 12
+255 val_255 2008-04-08 11
+255 val_255 2008-04-08 11
+255 val_255 2008-04-08 12
+255 val_255 2008-04-08 12
+255 val_255 2008-04-09 11
+255 val_255 2008-04-09 11
+255 val_255 2008-04-09 12
+255 val_255 2008-04-09 12
+256 val_256 2008-04-08 11
+256 val_256 2008-04-08 11
+256 val_256 2008-04-08 12
+256 val_256 2008-04-08 12
+256 val_256 2008-04-09 11
+256 val_256 2008-04-09 11
+256 val_256 2008-04-09 12
+256 val_256 2008-04-09 12
+257 val_257 2008-04-08 11
+257 val_257 2008-04-08 12
+257 val_257 2008-04-09 11
+257 val_257 2008-04-09 12
+258 val_258 2008-04-08 11
+258 val_258 2008-04-08 12
+258 val_258 2008-04-09 11
+258 val_258 2008-04-09 12
+26 val_26 2008-04-08 11
+26 val_26 2008-04-08 11
+26 val_26 2008-04-08 12
+26 val_26 2008-04-08 12
+26 val_26 2008-04-09 11
+26 val_26 2008-04-09 11
+26 val_26 2008-04-09 12
+26 val_26 2008-04-09 12
+260 val_260 2008-04-08 11
+260 val_260 2008-04-08 12
+260 val_260 2008-04-09 11
+260 val_260 2008-04-09 12
+262 val_262 2008-04-08 11
+262 val_262 2008-04-08 12
+262 val_262 2008-04-09 11
+262 val_262 2008-04-09 12
+263 val_263 2008-04-08 11
+263 val_263 2008-04-08 12
+263 val_263 2008-04-09 11
+263 val_263 2008-04-09 12
+265 val_265 2008-04-08 11
+265 val_265 2008-04-08 11
+265 val_265 2008-04-08 12
+265 val_265 2008-04-08 12
+265 val_265 2008-04-09 11
+265 val_265 2008-04-09 11
+265 val_265 2008-04-09 12
+265 val_265 2008-04-09 12
+266 val_266 2008-04-08 11
+266 val_266 2008-04-08 12
+266 val_266 2008-04-09 11
+266 val_266 2008-04-09 12
+27 val_27 2008-04-08 11
+27 val_27 2008-04-08 12
+27 val_27 2008-04-09 11
+27 val_27 2008-04-09 12
+272 val_272 2008-04-08 11
+272 val_272 2008-04-08 11
+272 val_272 2008-04-08 12
+272 val_272 2008-04-08 12
+272 val_272 2008-04-09 11
+272 val_272 2008-04-09 11
+272 val_272 2008-04-09 12
+272 val_272 2008-04-09 12
+273 val_273 2008-04-08 11
+273 val_273 2008-04-08 11
+273 val_273 2008-04-08 11
+273 val_273 2008-04-08 12
+273 val_273 2008-04-08 12
+273 val_273 2008-04-08 12
+273 val_273 2008-04-09 11
+273 val_273 2008-04-09 11
+273 val_273 2008-04-09 11
+273 val_273 2008-04-09 12
+273 val_273 2008-04-09 12
+273 val_273 2008-04-09 12
+274 val_274 2008-04-08 11
+274 val_274 2008-04-08 12
+274 val_274 2008-04-09 11
+274 val_274 2008-04-09 12
+275 val_275 2008-04-08 11
+275 val_275 2008-04-08 12
+275 val_275 2008-04-09 11
+275 val_275 2008-04-09 12
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 11
+277 val_277 2008-04-08 12
+277 val_277 2008-04-08 12
+277 val_277 2008-04-08 12
+277 val_277 2008-04-08 12
+277 val_277 2008-04-09 11
+277 val_277 2008-04-09 11
+277 val_277 2008-04-09 11
+277 val_277 2008-04-09 11
+277 val_277 2008-04-09 12
+277 val_277 2008-04-09 12
+277 val_277 2008-04-09 12
+277 val_277 2008-04-09 12
+278 val_278 2008-04-08 11
+278 val_278 2008-04-08 11
+278 val_278 2008-04-08 12
+278 val_278 2008-04-08 12
+278 val_278 2008-04-09 11
+278 val_278 2008-04-09 11
+278 val_278 2008-04-09 12
+278 val_278 2008-04-09 12
+28 val_28 2008-04-08 11
+28 val_28 2008-04-08 12
+28 val_28 2008-04-09 11
+28 val_28 2008-04-09 12
+280 val_280 2008-04-08 11
+280 val_280 2008-04-08 11
+280 val_280 2008-04-08 12
+280 val_280 2008-04-08 12
+280 val_280 2008-04-09 11
+280 val_280 2008-04-09 11
+280 val_280 2008-04-09 12
+280 val_280 2008-04-09 12
+281 val_281 2008-04-08 11
+281 val_281 2008-04-08 11
+281 val_281 2008-04-08 12
+281 val_281 2008-04-08 12
+281 val_281 2008-04-09 11
+281 val_281 2008-04-09 11
+281 val_281 2008-04-09 12
+281 val_281 2008-04-09 12
+282 val_282 2008-04-08 11
+282 val_282 2008-04-08 11
+282 val_282 2008-04-08 12
+282 val_282 2008-04-08 12
+282 val_282 2008-04-09 11
+282 val_282 2008-04-09 11
+282 val_282 2008-04-09 12
+282 val_282 2008-04-09 12
+283 val_283 2008-04-08 11
+283 val_283 2008-04-08 12
+283 val_283 2008-04-09 11
+283 val_283 2008-04-09 12
+284 val_284 2008-04-08 11
+284 val_284 2008-04-08 12
+284 val_284 2008-04-09 11
+284 val_284 2008-04-09 12
+285 val_285 2008-04-08 11
+285 val_285 2008-04-08 12
+285 val_285 2008-04-09 11
+285 val_285 2008-04-09 12
+286 val_286 2008-04-08 11
+286 val_286 2008-04-08 12
+286 val_286 2008-04-09 11
+286 val_286 2008-04-09 12
+287 val_287 2008-04-08 11
+287 val_287 2008-04-08 12
+287 val_287 2008-04-09 11
+287 val_287 2008-04-09 12
+288 val_288 2008-04-08 11
+288 val_288 2008-04-08 11
+288 val_288 2008-04-08 12
+288 val_288 2008-04-08 12
+288 val_288 2008-04-09 11
+288 val_288 2008-04-09 11
+288 val_288 2008-04-09 12
+288 val_288 2008-04-09 12
+289 val_289 2008-04-08 11
+289 val_289 2008-04-08 12
+289 val_289 2008-04-09 11
+289 val_289 2008-04-09 12
+291 val_291 2008-04-08 11
+291 val_291 2008-04-08 12
+291 val_291 2008-04-09 11
+291 val_291 2008-04-09 12
+292 val_292 2008-04-08 11
+292 val_292 2008-04-08 12
+292 val_292 2008-04-09 11
+292 val_292 2008-04-09 12
+296 val_296 2008-04-08 11
+296 val_296 2008-04-08 12
+296 val_296 2008-04-09 11
+296 val_296 2008-04-09 12
+298 val_298 2008-04-08 11
+298 val_298 2008-04-08 11
+298 val_298 2008-04-08 11
+298 val_298 2008-04-08 12
+298 val_298 2008-04-08 12
+298 val_298 2008-04-08 12
+298 val_298 2008-04-09 11
+298 val_298 2008-04-09 11
+298 val_298 2008-04-09 11
+298 val_298 2008-04-09 12
+298 val_298 2008-04-09 12
+298 val_298 2008-04-09 12
+30 val_30 2008-04-08 11
+30 val_30 2008-04-08 12
+30 val_30 2008-04-09 11
+30 val_30 2008-04-09 12
+302 val_302 2008-04-08 11
+302 val_302 2008-04-08 12
+302 val_302 2008-04-09 11
+302 val_302 2008-04-09 12
+305 val_305 2008-04-08 11
+305 val_305 2008-04-08 12
+305 val_305 2008-04-09 11
+305 val_305 2008-04-09 12
+306 val_306 2008-04-08 11
+306 val_306 2008-04-08 12
+306 val_306 2008-04-09 11
+306 val_306 2008-04-09 12
+307 val_307 2008-04-08 11
+307 val_307 2008-04-08 11
+307 val_307 2008-04-08 12
+307 val_307 2008-04-08 12
+307 val_307 2008-04-09 11
+307 val_307 2008-04-09 11
+307 val_307 2008-04-09 12
+307 val_307 2008-04-09 12
+308 val_308 2008-04-08 11
+308 val_308 2008-04-08 12
+308 val_308 2008-04-09 11
+308 val_308 2008-04-09 12
+309 val_309 2008-04-08 11
+309 val_309 2008-04-08 11
+309 val_309 2008-04-08 12
+309 val_309 2008-04-08 12
+309 val_309 2008-04-09 11
+309 val_309 2008-04-09 11
+309 val_309 2008-04-09 12
+309 val_309 2008-04-09 12
+310 val_310 2008-04-08 11
+310 val_310 2008-04-08 12
+310 val_310 2008-04-09 11
+310 val_310 2008-04-09 12
+311 val_311 2008-04-08 11
+311 val_311 2008-04-08 11
+311 val_311 2008-04-08 11
+311 val_311 2008-04-08 12
+311 val_311 2008-04-08 12
+311 val_311 2008-04-08 12
+311 val_311 2008-04-09 11
+311 val_311 2008-04-09 11
+311 val_311 2008-04-09 11
+311 val_311 2008-04-09 12
+311 val_311 2008-04-09 12
+311 val_311 2008-04-09 12
+315 val_315 2008-04-08 11
+315 val_315 2008-04-08 12
+315 val_315 2008-04-09 11
+315 val_315 2008-04-09 12
+316 val_316 2008-04-08 11
+316 val_316 2008-04-08 11
+316 val_316 2008-04-08 11
+316 val_316 2008-04-08 12
+316 val_316 2008-04-08 12
+316 val_316 2008-04-08 12
+316 val_316 2008-04-09 11
+316 val_316 2008-04-09 11
+316 val_316 2008-04-09 11
+316 val_316 2008-04-09 12
+316 val_316 2008-04-09 12
+316 val_316 2008-04-09 12
+317 val_317 2008-04-08 11
+317 val_317 2008-04-08 11
+317 val_317 2008-04-08 12
+317 val_317 2008-04-08 12
+317 val_317 2008-04-09 11
+317 val_317 2008-04-09 11
+317 val_317 2008-04-09 12
+317 val_317 2008-04-09 12
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 11
+318 val_318 2008-04-08 12
+318 val_318 2008-04-08 12
+318 val_318 2008-04-08 12
+318 val_318 2008-04-09 11
+318 val_318 2008-04-09 11
+318 val_318 2008-04-09 11
+318 val_318 2008-04-09 12
+318 val_318 2008-04-09 12
+318 val_318 2008-04-09 12
+321 val_321 2008-04-08 11
+321 val_321 2008-04-08 11
+321 val_321 2008-04-08 12
+321 val_321 2008-04-08 12
+321 val_321 2008-04-09 11
+321 val_321 2008-04-09 11
+321 val_321 2008-04-09 12
+321 val_321 2008-04-09 12
+322 val_322 2008-04-08 11
+322 val_322 2008-04-08 11
+322 val_322 2008-04-08 12
+322 val_322 2008-04-08 12
+322 val_322 2008-04-09 11
+322 val_322 2008-04-09 11
+322 val_322 2008-04-09 12
+322 val_322 2008-04-09 12
+323 val_323 2008-04-08 11
+323 val_323 2008-04-08 12
+323 val_323 2008-04-09 11
+323 val_323 2008-04-09 12
+325 val_325 2008-04-08 11
+325 val_325 2008-04-08 11
+325 val_325 2008-04-08 12
+325 val_325 2008-04-08 12
+325 val_325 2008-04-09 11
+325 val_325 2008-04-09 11
+325 val_325 2008-04-09 12
+325 val_325 2008-04-09 12
+327 val_327 2008-04-08 11
+327 val_327 2008-04-08 11
+327 val_327 2008-04-08 11
+327 val_327 2008-04-08 12
+327 val_327 2008-04-08 12
+327 val_327 2008-04-08 12
+327 val_327 2008-04-09 11
+327 val_327 2008-04-09 11
+327 val_327 2008-04-09 11
+327 val_327 2008-04-09 12
+327 val_327 2008-04-09 12
+327 val_327 2008-04-09 12
+33 val_33 2008-04-08 11
+33 val_33 2008-04-08 12
+33 val_33 2008-04-09 11
+33 val_33 2008-04-09 12
+331 val_331 2008-04-08 11
+331 val_331 2008-04-08 11
+331 val_331 2008-04-08 12
+331 val_331 2008-04-08 12
+331 val_331 2008-04-09 11
+331 val_331 2008-04-09 11
+331 val_331 2008-04-09 12
+331 val_331 2008-04-09 12
+332 val_332 2008-04-08 11
+332 val_332 2008-04-08 12
+332 val_332 2008-04-09 11
+332 val_332 2008-04-09 12
+333 val_333 2008-04-08 11
+333 val_333 2008-04-08 11
+333 val_333 2008-04-08 12
+333 val_333 2008-04-08 12
+333 val_333 2008-04-09 11
+333 val_333 2008-04-09 11
+333 val_333 2008-04-09 12
+333 val_333 2008-04-09 12
+335 val_335 2008-04-08 11
+335 val_335 2008-04-08 12
+335 val_335 2008-04-09 11
+335 val_335 2008-04-09 12
+336 val_336 2008-04-08 11
+336 val_336 2008-04-08 12
+336 val_336 2008-04-09 11
+336 val_336 2008-04-09 12
+338 val_338 2008-04-08 11
+338 val_338 2008-04-08 12
+338 val_338 2008-04-09 11
+338 val_338 2008-04-09 12
+339 val_339 2008-04-08 11
+339 val_339 2008-04-08 12
+339 val_339 2008-04-09 11
+339 val_339 2008-04-09 12
+34 val_34 2008-04-08 11
+34 val_34 2008-04-08 12
+34 val_34 2008-04-09 11
+34 val_34 2008-04-09 12
+341 val_341 2008-04-08 11
+341 val_341 2008-04-08 12
+341 val_341 2008-04-09 11
+341 val_341 2008-04-09 12
+342 val_342 2008-04-08 11
+342 val_342 2008-04-08 11
+342 val_342 2008-04-08 12
+342 val_342 2008-04-08 12
+342 val_342 2008-04-09 11
+342 val_342 2008-04-09 11
+342 val_342 2008-04-09 12
+342 val_342 2008-04-09 12
+344 val_344 2008-04-08 11
+344 val_344 2008-04-08 11
+344 val_344 2008-04-08 12
+344 val_344 2008-04-08 12
+344 val_344 2008-04-09 11
+344 val_344 2008-04-09 11
+344 val_344 2008-04-09 12
+344 val_344 2008-04-09 12
+345 val_345 2008-04-08 11
+345 val_345 2008-04-08 12
+345 val_345 2008-04-09 11
+345 val_345 2008-04-09 12
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 11
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-08 12
+348 val_348 2008-04-09 11
+348 val_348 2008-04-09 11
+348 val_348 2008-04-09 11
+348 val_348 2008-04-09 11
+348 val_348 2008-04-09 11
+348 val_348 2008-04-09 12
+348 val_348 2008-04-09 12
+348 val_348 2008-04-09 12
+348 val_348 2008-04-09 12
+348 val_348 2008-04-09 12
+35 val_35 2008-04-08 11
+35 val_35 2008-04-08 11
+35 val_35 2008-04-08 11
+35 val_35 2008-04-08 12
+35 val_35 2008-04-08 12
+35 val_35 2008-04-08 12
+35 val_35 2008-04-09 11
+35 val_35 2008-04-09 11
+35 val_35 2008-04-09 11
+35 val_35 2008-04-09 12
+35 val_35 2008-04-09 12
+35 val_35 2008-04-09 12
+351 val_351 2008-04-08 11
+351 val_351 2008-04-08 12
+351 val_351 2008-04-09 11
+351 val_351 2008-04-09 12
+353 val_353 2008-04-08 11
+353 val_353 2008-04-08 11
+353 val_353 2008-04-08 12
+353 val_353 2008-04-08 12
+353 val_353 2008-04-09 11
+353 val_353 2008-04-09 11
+353 val_353 2008-04-09 12
+353 val_353 2008-04-09 12
+356 val_356 2008-04-08 11
+356 val_356 2008-04-08 12
+356 val_356 2008-04-09 11
+356 val_356 2008-04-09 12
+360 val_360 2008-04-08 11
+360 val_360 2008-04-08 12
+360 val_360 2008-04-09 11
+360 val_360 2008-04-09 12
+362 val_362 2008-04-08 11
+362 val_362 2008-04-08 12
+362 val_362 2008-04-09 11
+362 val_362 2008-04-09 12
+364 val_364 2008-04-08 11
+364 val_364 2008-04-08 12
+364 val_364 2008-04-09 11
+364 val_364 2008-04-09 12
+365 val_365 2008-04-08 11
+365 val_365 2008-04-08 12
+365 val_365 2008-04-09 11
+365 val_365 2008-04-09 12
+366 val_366 2008-04-08 11
+366 val_366 2008-04-08 12
+366 val_366 2008-04-09 11
+366 val_366 2008-04-09 12
+367 val_367 2008-04-08 11
+367 val_367 2008-04-08 11
+367 val_367 2008-04-08 12
+367 val_367 2008-04-08 12
+367 val_367 2008-04-09 11
+367 val_367 2008-04-09 11
+367 val_367 2008-04-09 12
+367 val_367 2008-04-09 12
+368 val_368 2008-04-08 11
+368 val_368 2008-04-08 12
+368 val_368 2008-04-09 11
+368 val_368 2008-04-09 12
+369 val_369 2008-04-08 11
+369 val_369 2008-04-08 11
+369 val_369 2008-04-08 11
+369 val_369 2008-04-08 12
+369 val_369 2008-04-08 12
+369 val_369 2008-04-08 12
+369 val_369 2008-04-09 11
+369 val_369 2008-04-09 11
+369 val_369 2008-04-09 11
+369 val_369 2008-04-09 12
+369 val_369 2008-04-09 12
+369 val_369 2008-04-09 12
+37 val_37 2008-04-08 11
+37 val_37 2008-04-08 11
+37 val_37 2008-04-08 12
+37 val_37 2008-04-08 12
+37 val_37 2008-04-09 11
+37 val_37 2008-04-09 11
+37 val_37 2008-04-09 12
+37 val_37 2008-04-09 12
+373 val_373 2008-04-08 11
+373 val_373 2008-04-08 12
+373 val_373 2008-04-09 11
+373 val_373 2008-04-09 12
+374 val_374 2008-04-08 11
+374 val_374 2008-04-08 12
+374 val_374 2008-04-09 11
+374 val_374 2008-04-09 12
+375 val_375 2008-04-08 11
+375 val_375 2008-04-08 12
+375 val_375 2008-04-09 11
+375 val_375 2008-04-09 12
+377 val_377 2008-04-08 11
+377 val_377 2008-04-08 12
+377 val_377 2008-04-09 11
+377 val_377 2008-04-09 12
+378 val_378 2008-04-08 11
+378 val_378 2008-04-08 12
+378 val_378 2008-04-09 11
+378 val_378 2008-04-09 12
+379 val_379 2008-04-08 11
+379 val_379 2008-04-08 12
+379 val_379 2008-04-09 11
+379 val_379 2008-04-09 12
+382 val_382 2008-04-08 11
+382 val_382 2008-04-08 11
+382 val_382 2008-04-08 12
+382 val_382 2008-04-08 12
+382 val_382 2008-04-09 11
+382 val_382 2008-04-09 11
+382 val_382 2008-04-09 12
+382 val_382 2008-04-09 12
+384 val_384 2008-04-08 11
+384 val_384 2008-04-08 11
+384 val_384 2008-04-08 11
+384 val_384 2008-04-08 12
+384 val_384 2008-04-08 12
+384 val_384 2008-04-08 12
+384 val_384 2008-04-09 11
+384 val_384 2008-04-09 11
+384 val_384 2008-04-09 11
+384 val_384 2008-04-09 12
+384 val_384 2008-04-09 12
+384 val_384 2008-04-09 12
+386 val_386 2008-04-08 11
+386 val_386 2008-04-08 12
+386 val_386 2008-04-09 11
+386 val_386 2008-04-09 12
+389 val_389 2008-04-08 11
+389 val_389 2008-04-08 12
+389 val_389 2008-04-09 11
+389 val_389 2008-04-09 12
+392 val_392 2008-04-08 11
+392 val_392 2008-04-08 12
+392 val_392 2008-04-09 11
+392 val_392 2008-04-09 12
+393 val_393 2008-04-08 11
+393 val_393 2008-04-08 12
+393 val_393 2008-04-09 11
+393 val_393 2008-04-09 12
+394 val_394 2008-04-08 11
+394 val_394 2008-04-08 12
+394 val_394 2008-04-09 11
+394 val_394 2008-04-09 12
+395 val_395 2008-04-08 11
+395 val_395 2008-04-08 11
+395 val_395 2008-04-08 12
+395 val_395 2008-04-08 12
+395 val_395 2008-04-09 11
+395 val_395 2008-04-09 11
+395 val_395 2008-04-09 12
+395 val_395 2008-04-09 12
+396 val_396 2008-04-08 11
+396 val_396 2008-04-08 11
+396 val_396 2008-04-08 11
+396 val_396 2008-04-08 12
+396 val_396 2008-04-08 12
+396 val_396 2008-04-08 12
+396 val_396 2008-04-09 11
+396 val_396 2008-04-09 11
+396 val_396 2008-04-09 11
+396 val_396 2008-04-09 12
+396 val_396 2008-04-09 12
+396 val_396 2008-04-09 12
+397 val_397 2008-04-08 11
+397 val_397 2008-04-08 11
+397 val_397 2008-04-08 12
+397 val_397 2008-04-08 12
+397 val_397 2008-04-09 11
+397 val_397 2008-04-09 11
+397 val_397 2008-04-09 12
+397 val_397 2008-04-09 12
+399 val_399 2008-04-08 11
+399 val_399 2008-04-08 11
+399 val_399 2008-04-08 12
+399 val_399 2008-04-08 12
+399 val_399 2008-04-09 11
+399 val_399 2008-04-09 11
+399 val_399 2008-04-09 12
+399 val_399 2008-04-09 12
+4 val_4 2008-04-08 11
+4 val_4 2008-04-08 12
+4 val_4 2008-04-09 11
+4 val_4 2008-04-09 12
+400 val_400 2008-04-08 11
+400 val_400 2008-04-08 12
+400 val_400 2008-04-09 11
+400 val_400 2008-04-09 12
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 11
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-08 12
+401 val_401 2008-04-09 11
+401 val_401 2008-04-09 11
+401 val_401 2008-04-09 11
+401 val_401 2008-04-09 11
+401 val_401 2008-04-09 11
+401 val_401 2008-04-09 12
+401 val_401 2008-04-09 12
+401 val_401 2008-04-09 12
+401 val_401 2008-04-09 12
+401 val_401 2008-04-09 12
+402 val_402 2008-04-08 11
+402 val_402 2008-04-08 12
+402 val_402 2008-04-09 11
+402 val_402 2008-04-09 12
+403 val_403 2008-04-08 11
+403 val_403 2008-04-08 11
+403 val_403 2008-04-08 11
+403 val_403 2008-04-08 12
+403 val_403 2008-04-08 12
+403 val_403 2008-04-08 12
+403 val_403 2008-04-09 11
+403 val_403 2008-04-09 11
+403 val_403 2008-04-09 11
+403 val_403 2008-04-09 12
+403 val_403 2008-04-09 12
+403 val_403 2008-04-09 12
+404 val_404 2008-04-08 11
+404 val_404 2008-04-08 11
+404 val_404 2008-04-08 12
+404 val_404 2008-04-08 12
+404 val_404 2008-04-09 11
+404 val_404 2008-04-09 11
+404 val_404 2008-04-09 12
+404 val_404 2008-04-09 12
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 11
+406 val_406 2008-04-08 12
+406 val_406 2008-04-08 12
+406 val_406 2008-04-08 12
+406 val_406 2008-04-08 12
+406 val_406 2008-04-09 11
+406 val_406 2008-04-09 11
+406 val_406 2008-04-09 11
+406 val_406 2008-04-09 11
+406 val_406 2008-04-09 12
+406 val_406 2008-04-09 12
+406 val_406 2008-04-09 12
+406 val_406 2008-04-09 12
+407 val_407 2008-04-08 11
+407 val_407 2008-04-08 12
+407 val_407 2008-04-09 11
+407 val_407 2008-04-09 12
+409 val_409 2008-04-08 11
+409 val_409 2008-04-08 11
+409 val_409 2008-04-08 11
+409 val_409 2008-04-08 12
+409 val_409 2008-04-08 12
+409 val_409 2008-04-08 12
+409 val_409 2008-04-09 11
+409 val_409 2008-04-09 11
+409 val_409 2008-04-09 11
+409 val_409 2008-04-09 12
+409 val_409 2008-04-09 12
+409 val_409 2008-04-09 12
+41 val_41 2008-04-08 11
+41 val_41 2008-04-08 12
+41 val_41 2008-04-09 11
+41 val_41 2008-04-09 12
+411 val_411 2008-04-08 11
+411 val_411 2008-04-08 12
+411 val_411 2008-04-09 11
+411 val_411 2008-04-09 12
+413 val_413 2008-04-08 11
+413 val_413 2008-04-08 11
+413 val_413 2008-04-08 12
+413 val_413 2008-04-08 12
+413 val_413 2008-04-09 11
+413 val_413 2008-04-09 11
+413 val_413 2008-04-09 12
+413 val_413 2008-04-09 12
+414 val_414 2008-04-08 11
+414 val_414 2008-04-08 11
+414 val_414 2008-04-08 12
+414 val_414 2008-04-08 12
+414 val_414 2008-04-09 11
+414 val_414 2008-04-09 11
+414 val_414 2008-04-09 12
+414 val_414 2008-04-09 12
+417 val_417 2008-04-08 11
+417 val_417 2008-04-08 11
+417 val_417 2008-04-08 11
+417 val_417 2008-04-08 12
+417 val_417 2008-04-08 12
+417 val_417 2008-04-08 12
+417 val_417 2008-04-09 11
+417 val_417 2008-04-09 11
+417 val_417 2008-04-09 11
+417 val_417 2008-04-09 12
+417 val_417 2008-04-09 12
+417 val_417 2008-04-09 12
+418 val_418 2008-04-08 11
+418 val_418 2008-04-08 12
+418 val_418 2008-04-09 11
+418 val_418 2008-04-09 12
+419 val_419 2008-04-08 11
+419 val_419 2008-04-08 12
+419 val_419 2008-04-09 11
+419 val_419 2008-04-09 12
+42 val_42 2008-04-08 11
+42 val_42 2008-04-08 11
+42 val_42 2008-04-08 12
+42 val_42 2008-04-08 12
+42 val_42 2008-04-09 11
+42 val_42 2008-04-09 11
+42 val_42 2008-04-09 12
+42 val_42 2008-04-09 12
+421 val_421 2008-04-08 11
+421 val_421 2008-04-08 12
+421 val_421 2008-04-09 11
+421 val_421 2008-04-09 12
+424 val_424 2008-04-08 11
+424 val_424 2008-04-08 11
+424 val_424 2008-04-08 12
+424 val_424 2008-04-08 12
+424 val_424 2008-04-09 11
+424 val_424 2008-04-09 11
+424 val_424 2008-04-09 12
+424 val_424 2008-04-09 12
+427 val_427 2008-04-08 11
+427 val_427 2008-04-08 12
+427 val_427 2008-04-09 11
+427 val_427 2008-04-09 12
+429 val_429 2008-04-08 11
+429 val_429 2008-04-08 11
+429 val_429 2008-04-08 12
+429 val_429 2008-04-08 12
+429 val_429 2008-04-09 11
+429 val_429 2008-04-09 11
+429 val_429 2008-04-09 12
+429 val_429 2008-04-09 12
+43 val_43 2008-04-08 11
+43 val_43 2008-04-08 12
+43 val_43 2008-04-09 11
+43 val_43 2008-04-09 12
+430 val_430 2008-04-08 11
+430 val_430 2008-04-08 11
+430 val_430 2008-04-08 11
+430 val_430 2008-04-08 12
+430 val_430 2008-04-08 12
+430 val_430 2008-04-08 12
+430 val_430 2008-04-09 11
+430 val_430 2008-04-09 11
+430 val_430 2008-04-09 11
+430 val_430 2008-04-09 12
+430 val_430 2008-04-09 12
+430 val_430 2008-04-09 12
+431 val_431 2008-04-08 11
+431 val_431 2008-04-08 11
+431 val_431 2008-04-08 11
+431 val_431 2008-04-08 12
+431 val_431 2008-04-08 12
+431 val_431 2008-04-08 12
+431 val_431 2008-04-09 11
+431 val_431 2008-04-09 11
+431 val_431 2008-04-09 11
+431 val_431 2008-04-09 12
+431 val_431 2008-04-09 12
+431 val_431 2008-04-09 12
+432 val_432 2008-04-08 11
+432 val_432 2008-04-08 12
+432 val_432 2008-04-09 11
+432 val_432 2008-04-09 12
+435 val_435 2008-04-08 11
+435 val_435 2008-04-08 12
+435 val_435 2008-04-09 11
+435 val_435 2008-04-09 12
+436 val_436 2008-04-08 11
+436 val_436 2008-04-08 12
+436 val_436 2008-04-09 11
+436 val_436 2008-04-09 12
+437 val_437 2008-04-08 11
+437 val_437 2008-04-08 12
+437 val_437 2008-04-09 11
+437 val_437 2008-04-09 12
+438 val_438 2008-04-08 11
+438 val_438 2008-04-08 11
+438 val_438 2008-04-08 11
+438 val_438 2008-04-08 12
+438 val_438 2008-04-08 12
+438 val_438 2008-04-08 12
+438 val_438 2008-04-09 11
+438 val_438 2008-04-09 11
+438 val_438 2008-04-09 11
+438 val_438 2008-04-09 12
+438 val_438 2008-04-09 12
+438 val_438 2008-04-09 12
+439 val_439 2008-04-08 11
+439 val_439 2008-04-08 11
+439 val_439 2008-04-08 12
+439 val_439 2008-04-08 12
+439 val_439 2008-04-09 11
+439 val_439 2008-04-09 11
+439 val_439 2008-04-09 12
+439 val_439 2008-04-09 12
+44 val_44 2008-04-08 11
+44 val_44 2008-04-08 12
+44 val_44 2008-04-09 11
+44 val_44 2008-04-09 12
+443 val_443 2008-04-08 11
+443 val_443 2008-04-08 12
+443 val_443 2008-04-09 11
+443 val_443 2008-04-09 12
+444 val_444 2008-04-08 11
+444 val_444 2008-04-08 12
+444 val_444 2008-04-09 11
+444 val_444 2008-04-09 12
+446 val_446 2008-04-08 11
+446 val_446 2008-04-08 12
+446 val_446 2008-04-09 11
+446 val_446 2008-04-09 12
+448 val_448 2008-04-08 11
+448 val_448 2008-04-08 12
+448 val_448 2008-04-09 11
+448 val_448 2008-04-09 12
+449 val_449 2008-04-08 11
+449 val_449 2008-04-08 12
+449 val_449 2008-04-09 11
+449 val_449 2008-04-09 12
+452 val_452 2008-04-08 11
+452 val_452 2008-04-08 12
+452 val_452 2008-04-09 11
+452 val_452 2008-04-09 12
+453 val_453 2008-04-08 11
+453 val_453 2008-04-08 12
+453 val_453 2008-04-09 11
+453 val_453 2008-04-09 12
+454 val_454 2008-04-08 11
+454 val_454 2008-04-08 11
+454 val_454 2008-04-08 11
+454 val_454 2008-04-08 12
+454 val_454 2008-04-08 12
+454 val_454 2008-04-08 12
+454 val_454 2008-04-09 11
+454 val_454 2008-04-09 11
+454 val_454 2008-04-09 11
+454 val_454 2008-04-09 12
+454 val_454 2008-04-09 12
+454 val_454 2008-04-09 12
+455 val_455 2008-04-08 11
+455 val_455 2008-04-08 12
+455 val_455 2008-04-09 11
+455 val_455 2008-04-09 12
+457 val_457 2008-04-08 11
+457 val_457 2008-04-08 12
+457 val_457 2008-04-09 11
+457 val_457 2008-04-09 12
+458 val_458 2008-04-08 11
+458 val_458 2008-04-08 11
+458 val_458 2008-04-08 12
+458 val_458 2008-04-08 12
+458 val_458 2008-04-09 11
+458 val_458 2008-04-09 11
+458 val_458 2008-04-09 12
+458 val_458 2008-04-09 12
+459 val_459 2008-04-08 11
+459 val_459 2008-04-08 11
+459 val_459 2008-04-08 12
+459 val_459 2008-04-08 12
+459 val_459 2008-04-09 11
+459 val_459 2008-04-09 11
+459 val_459 2008-04-09 12
+459 val_459 2008-04-09 12
+460 val_460 2008-04-08 11
+460 val_460 2008-04-08 12
+460 val_460 2008-04-09 11
+460 val_460 2008-04-09 12
+462 val_462 2008-04-08 11
+462 val_462 2008-04-08 11
+462 val_462 2008-04-08 12
+462 val_462 2008-04-08 12
+462 val_462 2008-04-09 11
+462 val_462 2008-04-09 11
+462 val_462 2008-04-09 12
+462 val_462 2008-04-09 12
+463 val_463 2008-04-08 11
+463 val_463 2008-04-08 11
+463 val_463 2008-04-08 12
+463 val_463 2008-04-08 12
+463 val_463 2008-04-09 11
+463 val_463 2008-04-09 11
+463 val_463 2008-04-09 12
+463 val_463 2008-04-09 12
+466 val_466 2008-04-08 11
+466 val_466 2008-04-08 11
+466 val_466 2008-04-08 11
+466 val_466 2008-04-08 12
+466 val_466 2008-04-08 12
+466 val_466 2008-04-08 12
+466 val_466 2008-04-09 11
+466 val_466 2008-04-09 11
+466 val_466 2008-04-09 11
+466 val_466 2008-04-09 12
+466 val_466 2008-04-09 12
+466 val_466 2008-04-09 12
+467 val_467 2008-04-08 11
+467 val_467 2008-04-08 12
+467 val_467 2008-04-09 11
+467 val_467 2008-04-09 12
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 11
+468 val_468 2008-04-08 12
+468 val_468 2008-04-08 12
+468 val_468 2008-04-08 12
+468 val_468 2008-04-08 12
+468 val_468 2008-04-09 11
+468 val_468 2008-04-09 11
+468 val_468 2008-04-09 11
+468 val_468 2008-04-09 11
+468 val_468 2008-04-09 12
+468 val_468 2008-04-09 12
+468 val_468 2008-04-09 12
+468 val_468 2008-04-09 12
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 11
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-08 12
+469 val_469 2008-04-09 11
+469 val_469 2008-04-09 11
+469 val_469 2008-04-09 11
+469 val_469 2008-04-09 11
+469 val_469 2008-04-09 11
+469 val_469 2008-04-09 12
+469 val_469 2008-04-09 12
+469 val_469 2008-04-09 12
+469 val_469 2008-04-09 12
+469 val_469 2008-04-09 12
+47 val_47 2008-04-08 11
+47 val_47 2008-04-08 12
+47 val_47 2008-04-09 11
+47 val_47 2008-04-09 12
+470 val_470 2008-04-08 11
+470 val_470 2008-04-08 12
+470 val_470 2008-04-09 11
+470 val_470 2008-04-09 12
+472 val_472 2008-04-08 11
+472 val_472 2008-04-08 12
+472 val_472 2008-04-09 11
+472 val_472 2008-04-09 12
+475 val_475 2008-04-08 11
+475 val_475 2008-04-08 12
+475 val_475 2008-04-09 11
+475 val_475 2008-04-09 12
+477 val_477 2008-04-08 11
+477 val_477 2008-04-08 12
+477 val_477 2008-04-09 11
+477 val_477 2008-04-09 12
+478 val_478 2008-04-08 11
+478 val_478 2008-04-08 11
+478 val_478 2008-04-08 12
+478 val_478 2008-04-08 12
+478 val_478 2008-04-09 11
+478 val_478 2008-04-09 11
+478 val_478 2008-04-09 12
+478 val_478 2008-04-09 12
+479 val_479 2008-04-08 11
+479 val_479 2008-04-08 12
+479 val_479 2008-04-09 11
+479 val_479 2008-04-09 12
+480 val_480 2008-04-08 11
+480 val_480 2008-04-08 11
+480 val_480 2008-04-08 11
+480 val_480 2008-04-08 12
+480 val_480 2008-04-08 12
+480 val_480 2008-04-08 12
+480 val_480 2008-04-09 11
+480 val_480 2008-04-09 11
+480 val_480 2008-04-09 11
+480 val_480 2008-04-09 12
+480 val_480 2008-04-09 12
+480 val_480 2008-04-09 12
+481 val_481 2008-04-08 11
+481 val_481 2008-04-08 12
+481 val_481 2008-04-09 11
+481 val_481 2008-04-09 12
+482 val_482 2008-04-08 11
+482 val_482 2008-04-08 12
+482 val_482 2008-04-09 11
+482 val_482 2008-04-09 12
+483 val_483 2008-04-08 11
+483 val_483 2008-04-08 12
+483 val_483 2008-04-09 11
+483 val_483 2008-04-09 12
+484 val_484 2008-04-08 11
+484 val_484 2008-04-08 12
+484 val_484 2008-04-09 11
+484 val_484 2008-04-09 12
+485 val_485 2008-04-08 11
+485 val_485 2008-04-08 12
+485 val_485 2008-04-09 11
+485 val_485 2008-04-09 12
+487 val_487 2008-04-08 11
+487 val_487 2008-04-08 12
+487 val_487 2008-04-09 11
+487 val_487 2008-04-09 12
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 11
+489 val_489 2008-04-08 12
+489 val_489 2008-04-08 12
+489 val_489 2008-04-08 12
+489 val_489 2008-04-08 12
+489 val_489 2008-04-09 11
+489 val_489 2008-04-09 11
+489 val_489 2008-04-09 11
+489 val_489 2008-04-09 11
+489 val_489 2008-04-09 12
+489 val_489 2008-04-09 12
+489 val_489 2008-04-09 12
+489 val_489 2008-04-09 12
+490 val_490 2008-04-08 11
+490 val_490 2008-04-08 12
+490 val_490 2008-04-09 11
+490 val_490 2008-04-09 12
+491 val_491 2008-04-08 11
+491 val_491 2008-04-08 12
+491 val_491 2008-04-09 11
+491 val_491 2008-04-09 12
+492 val_492 2008-04-08 11
+492 val_492 2008-04-08 11
+492 val_492 2008-04-08 12
+492 val_492 2008-04-08 12
+492 val_492 2008-04-09 11
+492 val_492 2008-04-09 11
+492 val_492 2008-04-09 12
+492 val_492 2008-04-09 12
+493 val_493 2008-04-08 11
+493 val_493 2008-04-08 12
+493 val_493 2008-04-09 11
+493 val_493 2008-04-09 12
+494 val_494 2008-04-08 11
+494 val_494 2008-04-08 12
+494 val_494 2008-04-09 11
+494 val_494 2008-04-09 12
+495 val_495 2008-04-08 11
+495 val_495 2008-04-08 12
+495 val_495 2008-04-09 11
+495 val_495 2008-04-09 12
+496 val_496 2008-04-08 11
+496 val_496 2008-04-08 12
+496 val_496 2008-04-09 11
+496 val_496 2008-04-09 12
+497 val_497 2008-04-08 11
+497 val_497 2008-04-08 12
+497 val_497 2008-04-09 11
+497 val_497 2008-04-09 12
+498 val_498 2008-04-08 11
+498 val_498 2008-04-08 11
+498 val_498 2008-04-08 11
+498 val_498 2008-04-08 12
+498 val_498 2008-04-08 12
+498 val_498 2008-04-08 12
+498 val_498 2008-04-09 11
+498 val_498 2008-04-09 11
+498 val_498 2008-04-09 11
+498 val_498 2008-04-09 12
+498 val_498 2008-04-09 12
+498 val_498 2008-04-09 12
+5 val_5 2008-04-08 11
+5 val_5 2008-04-08 11
+5 val_5 2008-04-08 11
+5 val_5 2008-04-08 12
+5 val_5 2008-04-08 12
+5 val_5 2008-04-08 12
+5 val_5 2008-04-09 11
+5 val_5 2008-04-09 11
+5 val_5 2008-04-09 11
+5 val_5 2008-04-09 12
+5 val_5 2008-04-09 12
+5 val_5 2008-04-09 12
+51 val_51 2008-04-08 11
+51 val_51 2008-04-08 11
+51 val_51 2008-04-08 12
+51 val_51 2008-04-08 12
+51 val_51 2008-04-09 11
+51 val_51 2008-04-09 11
+51 val_51 2008-04-09 12
+51 val_51 2008-04-09 12
+53 val_53 2008-04-08 11
+53 val_53 2008-04-08 12
+53 val_53 2008-04-09 11
+53 val_53 2008-04-09 12
+54 val_54 2008-04-08 11
+54 val_54 2008-04-08 12
+54 val_54 2008-04-09 11
+54 val_54 2008-04-09 12
+57 val_57 2008-04-08 11
+57 val_57 2008-04-08 12
+57 val_57 2008-04-09 11
+57 val_57 2008-04-09 12
+58 val_58 2008-04-08 11
+58 val_58 2008-04-08 11
+58 val_58 2008-04-08 12
+58 val_58 2008-04-08 12
+58 val_58 2008-04-09 11
+58 val_58 2008-04-09 11
+58 val_58 2008-04-09 12
+58 val_58 2008-04-09 12
+64 val_64 2008-04-08 11
+64 val_64 2008-04-08 12
+64 val_64 2008-04-09 11
+64 val_64 2008-04-09 12
+65 val_65 2008-04-08 11
+65 val_65 2008-04-08 12
+65 val_65 2008-04-09 11
+65 val_65 2008-04-09 12
+66 val_66 2008-04-08 11
+66 val_66 2008-04-08 12
+66 val_66 2008-04-09 11
+66 val_66 2008-04-09 12
+67 val_67 2008-04-08 11
+67 val_67 2008-04-08 11
+67 val_67 2008-04-08 12
+67 val_67 2008-04-08 12
+67 val_67 2008-04-09 11
+67 val_67 2008-04-09 11
+67 val_67 2008-04-09 12
+67 val_67 2008-04-09 12
+69 val_69 2008-04-08 11
+69 val_69 2008-04-08 12
+69 val_69 2008-04-09 11
+69 val_69 2008-04-09 12
+70 val_70 2008-04-08 11
+70 val_70 2008-04-08 11
+70 val_70 2008-04-08 11
+70 val_70 2008-04-08 12
+70 val_70 2008-04-08 12
+70 val_70 2008-04-08 12
+70 val_70 2008-04-09 11
+70 val_70 2008-04-09 11
+70 val_70 2008-04-09 11
+70 val_70 2008-04-09 12
+70 val_70 2008-04-09 12
+70 val_70 2008-04-09 12
+72 val_72 2008-04-08 11
+72 val_72 2008-04-08 11
+72 val_72 2008-04-08 12
+72 val_72 2008-04-08 12
+72 val_72 2008-04-09 11
+72 val_72 2008-04-09 11
+72 val_72 2008-04-09 12
+72 val_72 2008-04-09 12
+74 val_74 2008-04-08 11
+74 val_74 2008-04-08 12
+74 val_74 2008-04-09 11
+74 val_74 2008-04-09 12
+76 val_76 2008-04-08 11
+76 val_76 2008-04-08 11
+76 val_76 2008-04-08 12
+76 val_76 2008-04-08 12
+76 val_76 2008-04-09 11
+76 val_76 2008-04-09 11
+76 val_76 2008-04-09 12
+76 val_76 2008-04-09 12
+77 val_77 2008-04-08 11
+77 val_77 2008-04-08 12
+77 val_77 2008-04-09 11
+77 val_77 2008-04-09 12
+78 val_78 2008-04-08 11
+78 val_78 2008-04-08 12
+78 val_78 2008-04-09 11
+78 val_78 2008-04-09 12
+8 val_8 2008-04-08 11
+8 val_8 2008-04-08 12
+8 val_8 2008-04-09 11
+8 val_8 2008-04-09 12
+80 val_80 2008-04-08 11
+80 val_80 2008-04-08 12
+80 val_80 2008-04-09 11
+80 val_80 2008-04-09 12
+82 val_82 2008-04-08 11
+82 val_82 2008-04-08 12
+82 val_82 2008-04-09 11
+82 val_82 2008-04-09 12
+83 val_83 2008-04-08 11
+83 val_83 2008-04-08 11
+83 val_83 2008-04-08 12
+83 val_83 2008-04-08 12
+83 val_83 2008-04-09 11
+83 val_83 2008-04-09 11
+83 val_83 2008-04-09 12
+83 val_83 2008-04-09 12
+84 val_84 2008-04-08 11
+84 val_84 2008-04-08 11
+84 val_84 2008-04-08 12
+84 val_84 2008-04-08 12
+84 val_84 2008-04-09 11
+84 val_84 2008-04-09 11
+84 val_84 2008-04-09 12
+84 val_84 2008-04-09 12
+85 val_85 2008-04-08 11
+85 val_85 2008-04-08 12
+85 val_85 2008-04-09 11
+85 val_85 2008-04-09 12
+86 val_86 2008-04-08 11
+86 val_86 2008-04-08 12
+86 val_86 2008-04-09 11
+86 val_86 2008-04-09 12
+87 val_87 2008-04-08 11
+87 val_87 2008-04-08 12
+87 val_87 2008-04-09 11
+87 val_87 2008-04-09 12
+9 val_9 2008-04-08 11
+9 val_9 2008-04-08 12
+9 val_9 2008-04-09 11
+9 val_9 2008-04-09 12
+90 val_90 2008-04-08 11
+90 val_90 2008-04-08 11
+90 val_90 2008-04-08 11
+90 val_90 2008-04-08 12
+90 val_90 2008-04-08 12
+90 val_90 2008-04-08 12
+90 val_90 2008-04-09 11
+90 val_90 2008-04-09 11
+90 val_90 2008-04-09 11
+90 val_90 2008-04-09 12
+90 val_90 2008-04-09 12
+90 val_90 2008-04-09 12
+92 val_92 2008-04-08 11
+92 val_92 2008-04-08 12
+92 val_92 2008-04-09 11
+92 val_92 2008-04-09 12
+95 val_95 2008-04-08 11
+95 val_95 2008-04-08 11
+95 val_95 2008-04-08 12
+95 val_95 2008-04-08 12
+95 val_95 2008-04-09 11
+95 val_95 2008-04-09 11
+95 val_95 2008-04-09 12
+95 val_95 2008-04-09 12
+96 val_96 2008-04-08 11
+96 val_96 2008-04-08 12
+96 val_96 2008-04-09 11
+96 val_96 2008-04-09 12
+97 val_97 2008-04-08 11
+97 val_97 2008-04-08 11
+97 val_97 2008-04-08 12
+97 val_97 2008-04-08 12
+97 val_97 2008-04-09 11
+97 val_97 2008-04-09 11
+97 val_97 2008-04-09 12
+97 val_97 2008-04-09 12
+98 val_98 2008-04-08 11
+98 val_98 2008-04-08 11
+98 val_98 2008-04-08 12
+98 val_98 2008-04-08 12
+98 val_98 2008-04-09 11
+98 val_98 2008-04-09 11
+98 val_98 2008-04-09 12
+98 val_98 2008-04-09 12
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/lvj_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/lvj_mapjoin.q.out b/ql/src/test/results/clientpositive/llap/lvj_mapjoin.q.out
new file mode 100644
index 0000000..03c2ebd
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/lvj_mapjoin.q.out
@@ -0,0 +1,296 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table sour1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+drop table sour1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table sour2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table sour2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table expod1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table expod1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table expod2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table expod2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table sour1(id int, av1 string, av2 string, av3 string) row format delimited fields terminated by ','
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@sour1
+POSTHOOK: query: create table sour1(id int, av1 string, av2 string, av3 string) row format delimited fields terminated by ','
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@sour1
+PREHOOK: query: create table sour2(id int, bv1 string, bv2 string, bv3 string) row format delimited fields terminated by ','
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@sour2
+POSTHOOK: query: create table sour2(id int, bv1 string, bv2 string, bv3 string) row format delimited fields terminated by ','
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@sour2
+PREHOOK: query: load data local inpath '../../data/files/sour1.txt' into table sour1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@sour1
+POSTHOOK: query: load data local inpath '../../data/files/sour1.txt' into table sour1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@sour1
+PREHOOK: query: load data local inpath '../../data/files//sour2.txt' into table sour2
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@sour2
+POSTHOOK: query: load data local inpath '../../data/files//sour2.txt' into table sour2
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@sour2
+PREHOOK: query: create table expod1(aid int, av array<string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@expod1
+POSTHOOK: query: create table expod1(aid int, av array<string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@expod1
+PREHOOK: query: create table expod2(bid int, bv array<string>)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@expod2
+POSTHOOK: query: create table expod2(bid int, bv array<string>)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@expod2
+PREHOOK: query: insert overwrite table expod1 select id, array(av1,av2,av3) from sour1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sour1
+PREHOOK: Output: default@expod1
+POSTHOOK: query: insert overwrite table expod1 select id, array(av1,av2,av3) from sour1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sour1
+POSTHOOK: Output: default@expod1
+POSTHOOK: Lineage: expod1.aid SIMPLE [(sour1)sour1.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: expod1.av EXPRESSION [(sour1)sour1.FieldSchema(name:av1, type:string, comment:null), (sour1)sour1.FieldSchema(name:av2, type:string, comment:null), (sour1)sour1.FieldSchema(name:av3, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table expod2 select id, array(bv1,bv2,bv3) from sour2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sour2
+PREHOOK: Output: default@expod2
+POSTHOOK: query: insert overwrite table expod2 select id, array(bv1,bv2,bv3) from sour2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sour2
+POSTHOOK: Output: default@expod2
+POSTHOOK: Lineage: expod2.bid SIMPLE [(sour2)sour2.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: expod2.bv EXPRESSION [(sour2)sour2.FieldSchema(name:bv1, type:string, comment:null), (sour2)sour2.FieldSchema(name:bv2, type:string, comment:null), (sour2)sour2.FieldSchema(name:bv3, type:string, comment:null), ]
+PREHOOK: query: explain with sub1 as
+(select aid, avalue from expod1 lateral view explode(av) avs as avalue ),
+sub2 as
+(select bid, bvalue from expod2 lateral view explode(bv) bvs as bvalue)
+select sub1.aid, sub1.avalue, sub2.bvalue
+from sub1,sub2
+where sub1.aid=sub2.bid
+PREHOOK: type: QUERY
+POSTHOOK: query: explain with sub1 as
+(select aid, avalue from expod1 lateral view explode(av) avs as avalue ),
+sub2 as
+(select bid, bvalue from expod2 lateral view explode(bv) bvs as bvalue)
+select sub1.aid, sub1.avalue, sub2.bvalue
+from sub1,sub2
+where sub1.aid=sub2.bid
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: expod1
+ Statistics: Num rows: 3 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: aid is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Lateral View Forward
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: aid (type: int)
+ outputColumnNames: aid
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Lateral View Join Operator
+ outputColumnNames: _col0, _col5
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col5 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 4 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Filter Operator
+ predicate: (_col0 = _col2) (type: boolean)
+ Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Select Operator
+ expressions: av (type: array<string>)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ UDTF Operator
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ function name: explode
+ Lateral View Join Operator
+ outputColumnNames: _col0, _col5
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col5 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 4 Data size: 57 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Filter Operator
+ predicate: (_col0 = _col2) (type: boolean)
+ Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 28 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: expod2
+ Statistics: Num rows: 3 Data size: 39 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: bid is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Lateral View Forward
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: bid (type: int)
+ outputColumnNames: bid
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ Lateral View Join Operator
+ outputColumnNames: _col0, _col5
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col5 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Select Operator
+ expressions: bv (type: array<string>)
+ outputColumnNames: _col0
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ UDTF Operator
+ Statistics: Num rows: 2 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+ function name: explode
+ Lateral View Join Operator
+ outputColumnNames: _col0, _col5
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: int), _col5 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 4 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: with sub1 as
+(select aid, avalue from expod1 lateral view explode(av) avs as avalue ),
+sub2 as
+(select bid, bvalue from expod2 lateral view explode(bv) bvs as bvalue)
+select sub1.aid, sub1.avalue, sub2.bvalue
+from sub1,sub2
+where sub1.aid=sub2.bid
+PREHOOK: type: QUERY
+PREHOOK: Input: default@expod1
+PREHOOK: Input: default@expod2
+#### A masked pattern was here ####
+POSTHOOK: query: with sub1 as
+(select aid, avalue from expod1 lateral view explode(av) avs as avalue ),
+sub2 as
+(select bid, bvalue from expod2 lateral view explode(bv) bvs as bvalue)
+select sub1.aid, sub1.avalue, sub2.bvalue
+from sub1,sub2
+where sub1.aid=sub2.bid
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@expod1
+POSTHOOK: Input: default@expod2
+#### A masked pattern was here ####
+1 a1 b1
+1 a1 b11
+1 a1 b111
+1 a11 b1
+1 a11 b11
+1 a11 b111
+1 a111 b1
+1 a111 b11
+1 a111 b111
+2 a2 b2
+2 a2 b22
+2 a2 b222
+2 a22 b2
+2 a22 b22
+2 a22 b222
+2 a222 b2
+2 a222 b22
+2 a222 b222
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
new file mode 100644
index 0000000..98d9ceb
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
@@ -0,0 +1,393 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE over1k(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE over1k(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@over1k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@over1k
+PREHOOK: query: CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: INSERT INTO TABLE t1 select dec from over1k
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@t1
+POSTHOOK: query: INSERT INTO TABLE t1 select dec from over1k
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+PREHOOK: query: CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t2
+POSTHOOK: query: CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t2
+PREHOOK: query: INSERT INTO TABLE t2 select dec from over1k
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@t2
+POSTHOOK: query: INSERT INTO TABLE t2 select dec from over1k
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.dec EXPRESSION [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+PREHOOK: query: explain
+select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: dec is not null (type: boolean)
+ Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 dec (type: decimal(6,2))
+ 1 dec (type: decimal(6,2))
+ outputColumnNames: _col0, _col4
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: decimal(4,2)), _col4 (type: decimal(4,0))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: decimal(4,2))
+ sort order: +
+ Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: decimal(4,0))
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: t2
+ Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: dec is not null (type: boolean)
+ Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: dec (type: decimal(6,2))
+ sort order: +
+ Map-reduce partition columns: dec (type: decimal(6,2))
+ Statistics: Num rows: 525 Data size: 58800 Basic stats: COMPLETE Column stats: NONE
+ value expressions: dec (type: decimal(4,0))
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: KEY.reducesinkkey0 (type: decimal(4,2)), VALUE._col0 (type: decimal(4,0))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+14 14
+14 14
+14 14
+14 14
+14 14
+14 14
+14 14
+14 14
+14 14
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+45 45
+45 45
+45 45
+45 45
+45 45
+6 6
+6 6
+6 6
+6 6
+6 6
+6 6
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+70 70
+70 70
+70 70
+70 70
+70 70
+70 70
+70 70
+79 79
+79 79
+79 79
+79 79
+79 79
+79 79
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) order by t1.dec
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+14 14
+14 14
+14 14
+14 14
+14 14
+14 14
+14 14
+14 14
+14 14
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+17 17
+45 45
+45 45
+45 45
+45 45
+45 45
+6 6
+6 6
+6 6
+6 6
+6 6
+6 6
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+62 62
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+64 64
+70 70
+70 70
+70 70
+70 70
+70 70
+70 70
+70 70
+79 79
+79 79
+79 79
+79 79
+79 79
+79 79
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+89 89
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
+9 9
[09/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_analyze.q.out b/ql/src/test/results/clientpositive/llap/orc_analyze.q.out
new file mode 100644
index 0000000..6eb9a93
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_analyze.q.out
@@ -0,0 +1,1726 @@
+PREHOOK: query: CREATE TABLE orc_create_people_staging (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp,
+ state string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people_staging
+POSTHOOK: query: CREATE TABLE orc_create_people_staging (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp,
+ state string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people_staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt' OVERWRITE INTO TABLE orc_create_people_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_create_people_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt' OVERWRITE INTO TABLE orc_create_people_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_create_people_staging
+PREHOOK: query: -- non-partitioned table
+-- partial scan gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp,
+ state string)
+STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- non-partitioned table
+-- partial scan gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp,
+ state string)
+STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people_staging
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people_staging
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Lineage: orc_create_people.address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people.start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.state SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:state, type:string, comment:null), ]
+PREHOOK: query: analyze table orc_create_people compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: analyze table orc_create_people compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: desc formatted orc_create_people
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+state string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 100
+ rawDataSize 52600
+ totalSize 3174
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table orc_create_people compute statistics partialscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: analyze table orc_create_people compute statistics partialscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: desc formatted orc_create_people
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+state string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 100
+ rawDataSize 52600
+ totalSize 3174
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table orc_create_people compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: analyze table orc_create_people compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: desc formatted orc_create_people
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+state string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 100
+ rawDataSize 52600
+ totalSize 3174
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table orc_create_people
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: drop table orc_create_people
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: -- auto stats gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp,
+ state string)
+STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- auto stats gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp,
+ state string)
+STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people_staging
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people_staging
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Lineage: orc_create_people.address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people.start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_create_people.state SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:state, type:string, comment:null), ]
+PREHOOK: query: desc formatted orc_create_people
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+state string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 100
+ rawDataSize 52600
+ totalSize 3174
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table orc_create_people
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: drop table orc_create_people
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: -- partitioned table
+-- partial scan gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- partitioned table
+-- partial scan gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people_staging
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people_staging
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+PREHOOK: query: analyze table orc_create_people partition(state) compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+PREHOOK: Output: default@orc_create_people@state=Ca
+PREHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="Or")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Or")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Or]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 22050
+ totalSize 2088
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+PREHOOK: Output: default@orc_create_people@state=Ca
+PREHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="Or")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Or")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Or]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 22050
+ totalSize 2088
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+PREHOOK: Output: default@orc_create_people@state=Ca
+PREHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="Or")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Or")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Or]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 22050
+ totalSize 2088
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table orc_create_people
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: drop table orc_create_people
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: -- auto stats gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- auto stats gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people_staging
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people_staging
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="Or")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Or")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Or]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 22050
+ totalSize 2088
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table orc_create_people
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: drop table orc_create_people
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: -- partitioned and bucketed table
+-- partial scan gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+clustered by (first_name)
+sorted by (last_name)
+into 4 buckets
+STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- partitioned and bucketed table
+-- partial scan gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+clustered by (first_name)
+sorted by (last_name)
+into 4 buckets
+STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people_staging
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people_staging
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+PREHOOK: query: analyze table orc_create_people partition(state) compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+PREHOOK: Output: default@orc_create_people@state=Ca
+PREHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [first_name]
+Sort Columns: [Order(col:last_name, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="Or")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Or")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Or]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 22050
+ totalSize 2088
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [first_name]
+Sort Columns: [Order(col:last_name, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+PREHOOK: Output: default@orc_create_people@state=Ca
+PREHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [first_name]
+Sort Columns: [Order(col:last_name, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="Or")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Or")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Or]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 22050
+ totalSize 2088
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [first_name]
+Sort Columns: [Order(col:last_name, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+PREHOOK: Output: default@orc_create_people@state=Ca
+PREHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [first_name]
+Sort Columns: [Order(col:last_name, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="Or")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Or")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Or]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 22050
+ totalSize 2088
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [first_name]
+Sort Columns: [Order(col:last_name, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table orc_create_people
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: drop table orc_create_people
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: -- auto stats gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+clustered by (first_name)
+sorted by (last_name)
+into 4 buckets
+STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- auto stats gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+clustered by (first_name)
+sorted by (last_name)
+into 4 buckets
+STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people_staging
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people_staging
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [first_name]
+Sort Columns: [Order(col:last_name, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="Or")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Or")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Or]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 22050
+ totalSize 2088
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: 4
+Bucket Columns: [first_name]
+Sort Columns: [Order(col:last_name, order:1)]
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table orc_create_people
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: drop table orc_create_people
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: -- create table with partitions containing text and ORC files.
+-- ORC files implements StatsProvidingRecordReader but text files does not.
+-- So the partition containing text file should not have statistics.
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- create table with partitions containing text and ORC files.
+-- ORC files implements StatsProvidingRecordReader but text files does not.
+-- So the partition containing text file should not have statistics.
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people_staging
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people_staging
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Ca).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).address SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:address, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).first_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:first_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).id SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).last_name SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:last_name, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ]
+PREHOOK: query: -- set the table to text format
+ALTER TABLE orc_create_people SET SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+PREHOOK: type: ALTERTABLE_SERIALIZER
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- set the table to text format
+ALTER TABLE orc_create_people SET SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
+POSTHOOK: type: ALTERTABLE_SERIALIZER
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: ALTER TABLE orc_create_people SET FILEFORMAT TEXTFILE
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: ALTER TABLE orc_create_people SET FILEFORMAT TEXTFILE
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: -- load the text data into a new partition
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE orc_create_people PARTITION(state="OH")
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- load the text data into a new partition
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE orc_create_people PARTITION(state="OH")
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=OH
+PREHOOK: query: -- set the table back to orc
+ALTER TABLE orc_create_people SET SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'
+PREHOOK: type: ALTERTABLE_SERIALIZER
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: -- set the table back to orc
+ALTER TABLE orc_create_people SET SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'
+POSTHOOK: type: ALTERTABLE_SERIALIZER
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: ALTER TABLE orc_create_people SET FILEFORMAT ORC
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: ALTER TABLE orc_create_people SET FILEFORMAT ORC
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+PREHOOK: query: analyze table orc_create_people partition(state) compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+PREHOOK: Output: default@orc_create_people@state=Ca
+PREHOOK: Output: default@orc_create_people@state=OH
+PREHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=OH
+POSTHOOK: Output: default@orc_create_people@state=Or
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="OH")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="OH")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [OH]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE false
+ numFiles 1
+ numRows -1
+ rawDataSize -1
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+PREHOOK: Output: default@orc_create_people@state=Ca
+PREHOOK: Output: default@orc_create_people@state=OH
+PREHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=OH
+POSTHOOK: Output: default@orc_create_people@state=Or
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="OH")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="OH")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [OH]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE false
+ numFiles 1
+ numRows -1
+ rawDataSize -1
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+PREHOOK: Output: default@orc_create_people@state=Ca
+PREHOOK: Output: default@orc_create_people@state=OH
+PREHOOK: Output: default@orc_create_people@state=Or
+POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people@state=Ca
+POSTHOOK: Output: default@orc_create_people@state=OH
+POSTHOOK: Output: default@orc_create_people@state=Or
+PREHOOK: query: desc formatted orc_create_people partition(state="Ca")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="Ca")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [Ca]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE true
+ numFiles 1
+ numRows 50
+ rawDataSize 21950
+ totalSize 2073
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: desc formatted orc_create_people partition(state="OH")
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_create_people
+POSTHOOK: query: desc formatted orc_create_people partition(state="OH")
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_create_people
+# col_name data_type comment
+
+id int
+first_name string
+last_name string
+address string
+salary decimal(10,0)
+start_date timestamp
+
+# Partition Information
+# col_name data_type comment
+
+state string
+
+# Detailed Partition Information
+Partition Value: [OH]
+Database: default
+Table: orc_create_people
+#### A masked pattern was here ####
+Partition Parameters:
+ COLUMN_STATS_ACCURATE false
+ numFiles 1
+ numRows -1
+ rawDataSize -1
+ totalSize 5812
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table orc_create_people
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_create_people
+PREHOOK: Output: default@orc_create_people
+POSTHOOK: query: drop table orc_create_people
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_create_people
+POSTHOOK: Output: default@orc_create_people
[11/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mergejoin.q.out b/ql/src/test/results/clientpositive/llap/mergejoin.q.out
new file mode 100644
index 0000000..e69f79d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/mergejoin.q.out
@@ -0,0 +1,3150 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+select * from src a join src1 b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
+select * from src a join src1 b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ filterExpr: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ filterExpr: key is not null (type: boolean)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from src a join src1 b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src a join src1 b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+128 val_128 128
+128 val_128 128
+128 val_128 128
+146 val_146 146 val_146
+146 val_146 146 val_146
+150 val_150 150 val_150
+213 val_213 213 val_213
+213 val_213 213 val_213
+224 val_224 224
+224 val_224 224
+238 val_238 238 val_238
+238 val_238 238 val_238
+255 val_255 255 val_255
+255 val_255 255 val_255
+273 val_273 273 val_273
+273 val_273 273 val_273
+273 val_273 273 val_273
+278 val_278 278 val_278
+278 val_278 278 val_278
+311 val_311 311 val_311
+311 val_311 311 val_311
+311 val_311 311 val_311
+369 val_369 369
+369 val_369 369
+369 val_369 369
+401 val_401 401 val_401
+401 val_401 401 val_401
+401 val_401 401 val_401
+401 val_401 401 val_401
+401 val_401 401 val_401
+406 val_406 406 val_406
+406 val_406 406 val_406
+406 val_406 406 val_406
+406 val_406 406 val_406
+66 val_66 66 val_66
+98 val_98 98 val_98
+98 val_98 98 val_98
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin
+PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab_part
+POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab_part
+PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin_part
+PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+PREHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin_part
+POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08
+POSTHOOK: Output: default@tab_part@ds=2008-04-08
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tab
+POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS ORCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tab
+PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcbucket_mapjoin
+PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+PREHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcbucket_mapjoin
+POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08
+POSTHOOK: Output: default@tab@ds=2008-04-08
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: explain
+select count(*)
+from tab a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from tab a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ filterExpr: key is not null (type: boolean)
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: b
+ filterExpr: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 23500 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 250 Data size: 23500 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ Statistics: Num rows: 275 Data size: 25850 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+103 val_103 2008-04-08 103 val_103 2008-04-08
+103 val_103 2008-04-08 103 val_103 2008-04-08
+103 val_103 2008-04-08 103 val_103 2008-04-08
+103 val_103 2008-04-08 103 val_103 2008-04-08
+105 val_105 2008-04-08 105 val_105 2008-04-08
+11 val_11 2008-04-08 11 val_11 2008-04-08
+114 val_114 2008-04-08 114 val_114 2008-04-08
+116 val_116 2008-04-08 116 val_116 2008-04-08
+118 val_118 2008-04-08 118 val_118 2008-04-08
+118 val_118 2008-04-08 118 val_118 2008-04-08
+118 val_118 2008-04-08 118 val_118 2008-04-08
+118 val_118 2008-04-08 118 val_118 2008-04-08
+125 val_125 2008-04-08 125 val_125 2008-04-08
+125 val_125 2008-04-08 125 val_125 2008-04-08
+125 val_125 2008-04-08 125 val_125 2008-04-08
+125 val_125 2008-04-08 125 val_125 2008-04-08
+129 val_129 2008-04-08 129 val_129 2008-04-08
+129 val_129 2008-04-08 129 val_129 2008-04-08
+129 val_129 2008-04-08 129 val_129 2008-04-08
+129 val_129 2008-04-08 129 val_129 2008-04-08
+134 val_134 2008-04-08 134 val_134 2008-04-08
+134 val_134 2008-04-08 134 val_134 2008-04-08
+134 val_134 2008-04-08 134 val_134 2008-04-08
+134 val_134 2008-04-08 134 val_134 2008-04-08
+136 val_136 2008-04-08 136 val_136 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+143 val_143 2008-04-08 143 val_143 2008-04-08
+145 val_145 2008-04-08 145 val_145 2008-04-08
+149 val_149 2008-04-08 149 val_149 2008-04-08
+149 val_149 2008-04-08 149 val_149 2008-04-08
+149 val_149 2008-04-08 149 val_149 2008-04-08
+149 val_149 2008-04-08 149 val_149 2008-04-08
+15 val_15 2008-04-08 15 val_15 2008-04-08
+15 val_15 2008-04-08 15 val_15 2008-04-08
+15 val_15 2008-04-08 15 val_15 2008-04-08
+15 val_15 2008-04-08 15 val_15 2008-04-08
+150 val_150 2008-04-08 150 val_150 2008-04-08
+152 val_152 2008-04-08 152 val_152 2008-04-08
+152 val_152 2008-04-08 152 val_152 2008-04-08
+152 val_152 2008-04-08 152 val_152 2008-04-08
+152 val_152 2008-04-08 152 val_152 2008-04-08
+156 val_156 2008-04-08 156 val_156 2008-04-08
+158 val_158 2008-04-08 158 val_158 2008-04-08
+163 val_163 2008-04-08 163 val_163 2008-04-08
+165 val_165 2008-04-08 165 val_165 2008-04-08
+165 val_165 2008-04-08 165 val_165 2008-04-08
+165 val_165 2008-04-08 165 val_165 2008-04-08
+165 val_165 2008-04-08 165 val_165 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+17 val_17 2008-04-08 17 val_17 2008-04-08
+170 val_170 2008-04-08 170 val_170 2008-04-08
+172 val_172 2008-04-08 172 val_172 2008-04-08
+172 val_172 2008-04-08 172 val_172 2008-04-08
+172 val_172 2008-04-08 172 val_172 2008-04-08
+172 val_172 2008-04-08 172 val_172 2008-04-08
+174 val_174 2008-04-08 174 val_174 2008-04-08
+174 val_174 2008-04-08 174 val_174 2008-04-08
+174 val_174 2008-04-08 174 val_174 2008-04-08
+174 val_174 2008-04-08 174 val_174 2008-04-08
+176 val_176 2008-04-08 176 val_176 2008-04-08
+176 val_176 2008-04-08 176 val_176 2008-04-08
+176 val_176 2008-04-08 176 val_176 2008-04-08
+176 val_176 2008-04-08 176 val_176 2008-04-08
+178 val_178 2008-04-08 178 val_178 2008-04-08
+181 val_181 2008-04-08 181 val_181 2008-04-08
+183 val_183 2008-04-08 183 val_183 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+189 val_189 2008-04-08 189 val_189 2008-04-08
+19 val_19 2008-04-08 19 val_19 2008-04-08
+190 val_190 2008-04-08 190 val_190 2008-04-08
+192 val_192 2008-04-08 192 val_192 2008-04-08
+194 val_194 2008-04-08 194 val_194 2008-04-08
+196 val_196 2008-04-08 196 val_196 2008-04-08
+2 val_2 2008-04-08 2 val_2 2008-04-08
+20 val_20 2008-04-08 20 val_20 2008-04-08
+200 val_200 2008-04-08 200 val_200 2008-04-08
+200 val_200 2008-04-08 200 val_200 2008-04-08
+200 val_200 2008-04-08 200 val_200 2008-04-08
+200 val_200 2008-04-08 200 val_200 2008-04-08
+202 val_202 2008-04-08 202 val_202 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+213 val_213 2008-04-08 213 val_213 2008-04-08
+213 val_213 2008-04-08 213 val_213 2008-04-08
+213 val_213 2008-04-08 213 val_213 2008-04-08
+213 val_213 2008-04-08 213 val_213 2008-04-08
+217 val_217 2008-04-08 217 val_217 2008-04-08
+217 val_217 2008-04-08 217 val_217 2008-04-08
+217 val_217 2008-04-08 217 val_217 2008-04-08
+217 val_217 2008-04-08 217 val_217 2008-04-08
+219 val_219 2008-04-08 219 val_219 2008-04-08
+219 val_219 2008-04-08 219 val_219 2008-04-08
+219 val_219 2008-04-08 219 val_219 2008-04-08
+219 val_219 2008-04-08 219 val_219 2008-04-08
+222 val_222 2008-04-08 222 val_222 2008-04-08
+224 val_224 2008-04-08 224 val_224 2008-04-08
+224 val_224 2008-04-08 224 val_224 2008-04-08
+224 val_224 2008-04-08 224 val_224 2008-04-08
+224 val_224 2008-04-08 224 val_224 2008-04-08
+226 val_226 2008-04-08 226 val_226 2008-04-08
+228 val_228 2008-04-08 228 val_228 2008-04-08
+233 val_233 2008-04-08 233 val_233 2008-04-08
+233 val_233 2008-04-08 233 val_233 2008-04-08
+233 val_233 2008-04-08 233 val_233 2008-04-08
+233 val_233 2008-04-08 233 val_233 2008-04-08
+235 val_235 2008-04-08 235 val_235 2008-04-08
+237 val_237 2008-04-08 237 val_237 2008-04-08
+237 val_237 2008-04-08 237 val_237 2008-04-08
+237 val_237 2008-04-08 237 val_237 2008-04-08
+237 val_237 2008-04-08 237 val_237 2008-04-08
+239 val_239 2008-04-08 239 val_239 2008-04-08
+239 val_239 2008-04-08 239 val_239 2008-04-08
+239 val_239 2008-04-08 239 val_239 2008-04-08
+239 val_239 2008-04-08 239 val_239 2008-04-08
+24 val_24 2008-04-08 24 val_24 2008-04-08
+24 val_24 2008-04-08 24 val_24 2008-04-08
+24 val_24 2008-04-08 24 val_24 2008-04-08
+24 val_24 2008-04-08 24 val_24 2008-04-08
+242 val_242 2008-04-08 242 val_242 2008-04-08
+242 val_242 2008-04-08 242 val_242 2008-04-08
+242 val_242 2008-04-08 242 val_242 2008-04-08
+242 val_242 2008-04-08 242 val_242 2008-04-08
+244 val_244 2008-04-08 244 val_244 2008-04-08
+248 val_248 2008-04-08 248 val_248 2008-04-08
+255 val_255 2008-04-08 255 val_255 2008-04-08
+255 val_255 2008-04-08 255 val_255 2008-04-08
+255 val_255 2008-04-08 255 val_255 2008-04-08
+255 val_255 2008-04-08 255 val_255 2008-04-08
+257 val_257 2008-04-08 257 val_257 2008-04-08
+26 val_26 2008-04-08 26 val_26 2008-04-08
+26 val_26 2008-04-08 26 val_26 2008-04-08
+26 val_26 2008-04-08 26 val_26 2008-04-08
+26 val_26 2008-04-08 26 val_26 2008-04-08
+260 val_260 2008-04-08 260 val_260 2008-04-08
+262 val_262 2008-04-08 262 val_262 2008-04-08
+266 val_266 2008-04-08 266 val_266 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+275 val_275 2008-04-08 275 val_275 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+28 val_28 2008-04-08 28 val_28 2008-04-08
+280 val_280 2008-04-08 280 val_280 2008-04-08
+280 val_280 2008-04-08 280 val_280 2008-04-08
+280 val_280 2008-04-08 280 val_280 2008-04-08
+280 val_280 2008-04-08 280 val_280 2008-04-08
+282 val_282 2008-04-08 282 val_282 2008-04-08
+282 val_282 2008-04-08 282 val_282 2008-04-08
+282 val_282 2008-04-08 282 val_282 2008-04-08
+282 val_282 2008-04-08 282 val_282 2008-04-08
+284 val_284 2008-04-08 284 val_284 2008-04-08
+286 val_286 2008-04-08 286 val_286 2008-04-08
+288 val_288 2008-04-08 288 val_288 2008-04-08
+288 val_288 2008-04-08 288 val_288 2008-04-08
+288 val_288 2008-04-08 288 val_288 2008-04-08
+288 val_288 2008-04-08 288 val_288 2008-04-08
+291 val_291 2008-04-08 291 val_291 2008-04-08
+305 val_305 2008-04-08 305 val_305 2008-04-08
+307 val_307 2008-04-08 307 val_307 2008-04-08
+307 val_307 2008-04-08 307 val_307 2008-04-08
+307 val_307 2008-04-08 307 val_307 2008-04-08
+307 val_307 2008-04-08 307 val_307 2008-04-08
+309 val_309 2008-04-08 309 val_309 2008-04-08
+309 val_309 2008-04-08 309 val_309 2008-04-08
+309 val_309 2008-04-08 309 val_309 2008-04-08
+309 val_309 2008-04-08 309 val_309 2008-04-08
+310 val_310 2008-04-08 310 val_310 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+321 val_321 2008-04-08 321 val_321 2008-04-08
+321 val_321 2008-04-08 321 val_321 2008-04-08
+321 val_321 2008-04-08 321 val_321 2008-04-08
+321 val_321 2008-04-08 321 val_321 2008-04-08
+323 val_323 2008-04-08 323 val_323 2008-04-08
+325 val_325 2008-04-08 325 val_325 2008-04-08
+325 val_325 2008-04-08 325 val_325 2008-04-08
+325 val_325 2008-04-08 325 val_325 2008-04-08
+325 val_325 2008-04-08 325 val_325 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+33 val_33 2008-04-08 33 val_33 2008-04-08
+332 val_332 2008-04-08 332 val_332 2008-04-08
+336 val_336 2008-04-08 336 val_336 2008-04-08
+338 val_338 2008-04-08 338 val_338 2008-04-08
+341 val_341 2008-04-08 341 val_341 2008-04-08
+345 val_345 2008-04-08 345 val_345 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+356 val_356 2008-04-08 356 val_356 2008-04-08
+365 val_365 2008-04-08 365 val_365 2008-04-08
+367 val_367 2008-04-08 367 val_367 2008-04-08
+367 val_367 2008-04-08 367 val_367 2008-04-08
+367 val_367 2008-04-08 367 val_367 2008-04-08
+367 val_367 2008-04-08 367 val_367 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+37 val_37 2008-04-08 37 val_37 2008-04-08
+37 val_37 2008-04-08 37 val_37 2008-04-08
+37 val_37 2008-04-08 37 val_37 2008-04-08
+37 val_37 2008-04-08 37 val_37 2008-04-08
+374 val_374 2008-04-08 374 val_374 2008-04-08
+378 val_378 2008-04-08 378 val_378 2008-04-08
+389 val_389 2008-04-08 389 val_389 2008-04-08
+392 val_392 2008-04-08 392 val_392 2008-04-08
+394 val_394 2008-04-08 394 val_394 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+4 val_4 2008-04-08 4 val_4 2008-04-08
+400 val_400 2008-04-08 400 val_400 2008-04-08
+402 val_402 2008-04-08 402 val_402 2008-04-08
+404 val_404 2008-04-08 404 val_404 2008-04-08
+404 val_404 2008-04-08 404 val_404 2008-04-08
+404 val_404 2008-04-08 404 val_404 2008-04-08
+404 val_404 2008-04-08 404 val_404 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+411 val_411 2008-04-08 411 val_411 2008-04-08
+413 val_413 2008-04-08 413 val_413 2008-04-08
+413 val_413 2008-04-08 413 val_413 2008-04-08
+413 val_413 2008-04-08 413 val_413 2008-04-08
+413 val_413 2008-04-08 413 val_413 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+419 val_419 2008-04-08 419 val_419 2008-04-08
+42 val_42 2008-04-08 42 val_42 2008-04-08
+42 val_42 2008-04-08 42 val_42 2008-04-08
+42 val_42 2008-04-08 42 val_42 2008-04-08
+42 val_42 2008-04-08 42 val_42 2008-04-08
+424 val_424 2008-04-08 424 val_424 2008-04-08
+424 val_424 2008-04-08 424 val_424 2008-04-08
+424 val_424 2008-04-08 424 val_424 2008-04-08
+424 val_424 2008-04-08 424 val_424 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+435 val_435 2008-04-08 435 val_435 2008-04-08
+437 val_437 2008-04-08 437 val_437 2008-04-08
+439 val_439 2008-04-08 439 val_439 2008-04-08
+439 val_439 2008-04-08 439 val_439 2008-04-08
+439 val_439 2008-04-08 439 val_439 2008-04-08
+439 val_439 2008-04-08 439 val_439 2008-04-08
+44 val_44 2008-04-08 44 val_44 2008-04-08
+444 val_444 2008-04-08 444 val_444 2008-04-08
+446 val_446 2008-04-08 446 val_446 2008-04-08
+448 val_448 2008-04-08 448 val_448 2008-04-08
+453 val_453 2008-04-08 453 val_453 2008-04-08
+455 val_455 2008-04-08 455 val_455 2008-04-08
+457 val_457 2008-04-08 457 val_457 2008-04-08
+459 val_459 2008-04-08 459 val_459 2008-04-08
+459 val_459 2008-04-08 459 val_459 2008-04-08
+459 val_459 2008-04-08 459 val_459 2008-04-08
+459 val_459 2008-04-08 459 val_459 2008-04-08
+460 val_460 2008-04-08 460 val_460 2008-04-08
+462 val_462 2008-04-08 462 val_462 2008-04-08
+462 val_462 2008-04-08 462 val_462 2008-04-08
+462 val_462 2008-04-08 462 val_462 2008-04-08
+462 val_462 2008-04-08 462 val_462 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+475 val_475 2008-04-08 475 val_475 2008-04-08
+477 val_477 2008-04-08 477 val_477 2008-04-08
+479 val_479 2008-04-08 479 val_479 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+482 val_482 2008-04-08 482 val_482 2008-04-08
+484 val_484 2008-04-08 484 val_484 2008-04-08
+491 val_491 2008-04-08 491 val_491 2008-04-08
+493 val_493 2008-04-08 493 val_493 2008-04-08
+495 val_495 2008-04-08 495 val_495 2008-04-08
+497 val_497 2008-04-08 497 val_497 2008-04-08
+51 val_51 2008-04-08 51 val_51 2008-04-08
+51 val_51 2008-04-08 51 val_51 2008-04-08
+51 val_51 2008-04-08 51 val_51 2008-04-08
+51 val_51 2008-04-08 51 val_51 2008-04-08
+53 val_53 2008-04-08 53 val_53 2008-04-08
+57 val_57 2008-04-08 57 val_57 2008-04-08
+64 val_64 2008-04-08 64 val_64 2008-04-08
+66 val_66 2008-04-08 66 val_66 2008-04-08
+77 val_77 2008-04-08 77 val_77 2008-04-08
+8 val_8 2008-04-08 8 val_8 2008-04-08
+80 val_80 2008-04-08 80 val_80 2008-04-08
+82 val_82 2008-04-08 82 val_82 2008-04-08
+84 val_84 2008-04-08 84 val_84 2008-04-08
+84 val_84 2008-04-08 84 val_84 2008-04-08
+84 val_84 2008-04-08 84 val_84 2008-04-08
+84 val_84 2008-04-08 84 val_84 2008-04-08
+86 val_86 2008-04-08 86 val_86 2008-04-08
+95 val_95 2008-04-08 95 val_95 2008-04-08
+95 val_95 2008-04-08 95 val_95 2008-04-08
+95 val_95 2008-04-08 95 val_95 2008-04-08
+95 val_95 2008-04-08 95 val_95 2008-04-08
+97 val_97 2008-04-08 97 val_97 2008-04-08
+97 val_97 2008-04-08 97 val_97 2008-04-08
+97 val_97 2008-04-08 97 val_97 2008-04-08
+97 val_97 2008-04-08 97 val_97 2008-04-08
+PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+0 val_0 2008-04-08 0 val_0 2008-04-08
+103 val_103 2008-04-08 103 val_103 2008-04-08
+103 val_103 2008-04-08 103 val_103 2008-04-08
+103 val_103 2008-04-08 103 val_103 2008-04-08
+103 val_103 2008-04-08 103 val_103 2008-04-08
+105 val_105 2008-04-08 105 val_105 2008-04-08
+11 val_11 2008-04-08 11 val_11 2008-04-08
+114 val_114 2008-04-08 114 val_114 2008-04-08
+116 val_116 2008-04-08 116 val_116 2008-04-08
+118 val_118 2008-04-08 118 val_118 2008-04-08
+118 val_118 2008-04-08 118 val_118 2008-04-08
+118 val_118 2008-04-08 118 val_118 2008-04-08
+118 val_118 2008-04-08 118 val_118 2008-04-08
+125 val_125 2008-04-08 125 val_125 2008-04-08
+125 val_125 2008-04-08 125 val_125 2008-04-08
+125 val_125 2008-04-08 125 val_125 2008-04-08
+125 val_125 2008-04-08 125 val_125 2008-04-08
+129 val_129 2008-04-08 129 val_129 2008-04-08
+129 val_129 2008-04-08 129 val_129 2008-04-08
+129 val_129 2008-04-08 129 val_129 2008-04-08
+129 val_129 2008-04-08 129 val_129 2008-04-08
+134 val_134 2008-04-08 134 val_134 2008-04-08
+134 val_134 2008-04-08 134 val_134 2008-04-08
+134 val_134 2008-04-08 134 val_134 2008-04-08
+134 val_134 2008-04-08 134 val_134 2008-04-08
+136 val_136 2008-04-08 136 val_136 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+138 val_138 2008-04-08 138 val_138 2008-04-08
+143 val_143 2008-04-08 143 val_143 2008-04-08
+145 val_145 2008-04-08 145 val_145 2008-04-08
+149 val_149 2008-04-08 149 val_149 2008-04-08
+149 val_149 2008-04-08 149 val_149 2008-04-08
+149 val_149 2008-04-08 149 val_149 2008-04-08
+149 val_149 2008-04-08 149 val_149 2008-04-08
+15 val_15 2008-04-08 15 val_15 2008-04-08
+15 val_15 2008-04-08 15 val_15 2008-04-08
+15 val_15 2008-04-08 15 val_15 2008-04-08
+15 val_15 2008-04-08 15 val_15 2008-04-08
+150 val_150 2008-04-08 150 val_150 2008-04-08
+152 val_152 2008-04-08 152 val_152 2008-04-08
+152 val_152 2008-04-08 152 val_152 2008-04-08
+152 val_152 2008-04-08 152 val_152 2008-04-08
+152 val_152 2008-04-08 152 val_152 2008-04-08
+156 val_156 2008-04-08 156 val_156 2008-04-08
+158 val_158 2008-04-08 158 val_158 2008-04-08
+163 val_163 2008-04-08 163 val_163 2008-04-08
+165 val_165 2008-04-08 165 val_165 2008-04-08
+165 val_165 2008-04-08 165 val_165 2008-04-08
+165 val_165 2008-04-08 165 val_165 2008-04-08
+165 val_165 2008-04-08 165 val_165 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+167 val_167 2008-04-08 167 val_167 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+169 val_169 2008-04-08 169 val_169 2008-04-08
+17 val_17 2008-04-08 17 val_17 2008-04-08
+170 val_170 2008-04-08 170 val_170 2008-04-08
+172 val_172 2008-04-08 172 val_172 2008-04-08
+172 val_172 2008-04-08 172 val_172 2008-04-08
+172 val_172 2008-04-08 172 val_172 2008-04-08
+172 val_172 2008-04-08 172 val_172 2008-04-08
+174 val_174 2008-04-08 174 val_174 2008-04-08
+174 val_174 2008-04-08 174 val_174 2008-04-08
+174 val_174 2008-04-08 174 val_174 2008-04-08
+174 val_174 2008-04-08 174 val_174 2008-04-08
+176 val_176 2008-04-08 176 val_176 2008-04-08
+176 val_176 2008-04-08 176 val_176 2008-04-08
+176 val_176 2008-04-08 176 val_176 2008-04-08
+176 val_176 2008-04-08 176 val_176 2008-04-08
+178 val_178 2008-04-08 178 val_178 2008-04-08
+181 val_181 2008-04-08 181 val_181 2008-04-08
+183 val_183 2008-04-08 183 val_183 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+187 val_187 2008-04-08 187 val_187 2008-04-08
+189 val_189 2008-04-08 189 val_189 2008-04-08
+19 val_19 2008-04-08 19 val_19 2008-04-08
+190 val_190 2008-04-08 190 val_190 2008-04-08
+192 val_192 2008-04-08 192 val_192 2008-04-08
+194 val_194 2008-04-08 194 val_194 2008-04-08
+196 val_196 2008-04-08 196 val_196 2008-04-08
+2 val_2 2008-04-08 2 val_2 2008-04-08
+20 val_20 2008-04-08 20 val_20 2008-04-08
+200 val_200 2008-04-08 200 val_200 2008-04-08
+200 val_200 2008-04-08 200 val_200 2008-04-08
+200 val_200 2008-04-08 200 val_200 2008-04-08
+200 val_200 2008-04-08 200 val_200 2008-04-08
+202 val_202 2008-04-08 202 val_202 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+208 val_208 2008-04-08 208 val_208 2008-04-08
+213 val_213 2008-04-08 213 val_213 2008-04-08
+213 val_213 2008-04-08 213 val_213 2008-04-08
+213 val_213 2008-04-08 213 val_213 2008-04-08
+213 val_213 2008-04-08 213 val_213 2008-04-08
+217 val_217 2008-04-08 217 val_217 2008-04-08
+217 val_217 2008-04-08 217 val_217 2008-04-08
+217 val_217 2008-04-08 217 val_217 2008-04-08
+217 val_217 2008-04-08 217 val_217 2008-04-08
+219 val_219 2008-04-08 219 val_219 2008-04-08
+219 val_219 2008-04-08 219 val_219 2008-04-08
+219 val_219 2008-04-08 219 val_219 2008-04-08
+219 val_219 2008-04-08 219 val_219 2008-04-08
+222 val_222 2008-04-08 222 val_222 2008-04-08
+224 val_224 2008-04-08 224 val_224 2008-04-08
+224 val_224 2008-04-08 224 val_224 2008-04-08
+224 val_224 2008-04-08 224 val_224 2008-04-08
+224 val_224 2008-04-08 224 val_224 2008-04-08
+226 val_226 2008-04-08 226 val_226 2008-04-08
+228 val_228 2008-04-08 228 val_228 2008-04-08
+233 val_233 2008-04-08 233 val_233 2008-04-08
+233 val_233 2008-04-08 233 val_233 2008-04-08
+233 val_233 2008-04-08 233 val_233 2008-04-08
+233 val_233 2008-04-08 233 val_233 2008-04-08
+235 val_235 2008-04-08 235 val_235 2008-04-08
+237 val_237 2008-04-08 237 val_237 2008-04-08
+237 val_237 2008-04-08 237 val_237 2008-04-08
+237 val_237 2008-04-08 237 val_237 2008-04-08
+237 val_237 2008-04-08 237 val_237 2008-04-08
+239 val_239 2008-04-08 239 val_239 2008-04-08
+239 val_239 2008-04-08 239 val_239 2008-04-08
+239 val_239 2008-04-08 239 val_239 2008-04-08
+239 val_239 2008-04-08 239 val_239 2008-04-08
+24 val_24 2008-04-08 24 val_24 2008-04-08
+24 val_24 2008-04-08 24 val_24 2008-04-08
+24 val_24 2008-04-08 24 val_24 2008-04-08
+24 val_24 2008-04-08 24 val_24 2008-04-08
+242 val_242 2008-04-08 242 val_242 2008-04-08
+242 val_242 2008-04-08 242 val_242 2008-04-08
+242 val_242 2008-04-08 242 val_242 2008-04-08
+242 val_242 2008-04-08 242 val_242 2008-04-08
+244 val_244 2008-04-08 244 val_244 2008-04-08
+248 val_248 2008-04-08 248 val_248 2008-04-08
+255 val_255 2008-04-08 255 val_255 2008-04-08
+255 val_255 2008-04-08 255 val_255 2008-04-08
+255 val_255 2008-04-08 255 val_255 2008-04-08
+255 val_255 2008-04-08 255 val_255 2008-04-08
+257 val_257 2008-04-08 257 val_257 2008-04-08
+26 val_26 2008-04-08 26 val_26 2008-04-08
+26 val_26 2008-04-08 26 val_26 2008-04-08
+26 val_26 2008-04-08 26 val_26 2008-04-08
+26 val_26 2008-04-08 26 val_26 2008-04-08
+260 val_260 2008-04-08 260 val_260 2008-04-08
+262 val_262 2008-04-08 262 val_262 2008-04-08
+266 val_266 2008-04-08 266 val_266 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+273 val_273 2008-04-08 273 val_273 2008-04-08
+275 val_275 2008-04-08 275 val_275 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+277 val_277 2008-04-08 277 val_277 2008-04-08
+28 val_28 2008-04-08 28 val_28 2008-04-08
+280 val_280 2008-04-08 280 val_280 2008-04-08
+280 val_280 2008-04-08 280 val_280 2008-04-08
+280 val_280 2008-04-08 280 val_280 2008-04-08
+280 val_280 2008-04-08 280 val_280 2008-04-08
+282 val_282 2008-04-08 282 val_282 2008-04-08
+282 val_282 2008-04-08 282 val_282 2008-04-08
+282 val_282 2008-04-08 282 val_282 2008-04-08
+282 val_282 2008-04-08 282 val_282 2008-04-08
+284 val_284 2008-04-08 284 val_284 2008-04-08
+286 val_286 2008-04-08 286 val_286 2008-04-08
+288 val_288 2008-04-08 288 val_288 2008-04-08
+288 val_288 2008-04-08 288 val_288 2008-04-08
+288 val_288 2008-04-08 288 val_288 2008-04-08
+288 val_288 2008-04-08 288 val_288 2008-04-08
+291 val_291 2008-04-08 291 val_291 2008-04-08
+305 val_305 2008-04-08 305 val_305 2008-04-08
+307 val_307 2008-04-08 307 val_307 2008-04-08
+307 val_307 2008-04-08 307 val_307 2008-04-08
+307 val_307 2008-04-08 307 val_307 2008-04-08
+307 val_307 2008-04-08 307 val_307 2008-04-08
+309 val_309 2008-04-08 309 val_309 2008-04-08
+309 val_309 2008-04-08 309 val_309 2008-04-08
+309 val_309 2008-04-08 309 val_309 2008-04-08
+309 val_309 2008-04-08 309 val_309 2008-04-08
+310 val_310 2008-04-08 310 val_310 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+316 val_316 2008-04-08 316 val_316 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+318 val_318 2008-04-08 318 val_318 2008-04-08
+321 val_321 2008-04-08 321 val_321 2008-04-08
+321 val_321 2008-04-08 321 val_321 2008-04-08
+321 val_321 2008-04-08 321 val_321 2008-04-08
+321 val_321 2008-04-08 321 val_321 2008-04-08
+323 val_323 2008-04-08 323 val_323 2008-04-08
+325 val_325 2008-04-08 325 val_325 2008-04-08
+325 val_325 2008-04-08 325 val_325 2008-04-08
+325 val_325 2008-04-08 325 val_325 2008-04-08
+325 val_325 2008-04-08 325 val_325 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+327 val_327 2008-04-08 327 val_327 2008-04-08
+33 val_33 2008-04-08 33 val_33 2008-04-08
+332 val_332 2008-04-08 332 val_332 2008-04-08
+336 val_336 2008-04-08 336 val_336 2008-04-08
+338 val_338 2008-04-08 338 val_338 2008-04-08
+341 val_341 2008-04-08 341 val_341 2008-04-08
+345 val_345 2008-04-08 345 val_345 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+35 val_35 2008-04-08 35 val_35 2008-04-08
+356 val_356 2008-04-08 356 val_356 2008-04-08
+365 val_365 2008-04-08 365 val_365 2008-04-08
+367 val_367 2008-04-08 367 val_367 2008-04-08
+367 val_367 2008-04-08 367 val_367 2008-04-08
+367 val_367 2008-04-08 367 val_367 2008-04-08
+367 val_367 2008-04-08 367 val_367 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+369 val_369 2008-04-08 369 val_369 2008-04-08
+37 val_37 2008-04-08 37 val_37 2008-04-08
+37 val_37 2008-04-08 37 val_37 2008-04-08
+37 val_37 2008-04-08 37 val_37 2008-04-08
+37 val_37 2008-04-08 37 val_37 2008-04-08
+374 val_374 2008-04-08 374 val_374 2008-04-08
+378 val_378 2008-04-08 378 val_378 2008-04-08
+389 val_389 2008-04-08 389 val_389 2008-04-08
+392 val_392 2008-04-08 392 val_392 2008-04-08
+394 val_394 2008-04-08 394 val_394 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+396 val_396 2008-04-08 396 val_396 2008-04-08
+4 val_4 2008-04-08 4 val_4 2008-04-08
+400 val_400 2008-04-08 400 val_400 2008-04-08
+402 val_402 2008-04-08 402 val_402 2008-04-08
+404 val_404 2008-04-08 404 val_404 2008-04-08
+404 val_404 2008-04-08 404 val_404 2008-04-08
+404 val_404 2008-04-08 404 val_404 2008-04-08
+404 val_404 2008-04-08 404 val_404 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+406 val_406 2008-04-08 406 val_406 2008-04-08
+411 val_411 2008-04-08 411 val_411 2008-04-08
+413 val_413 2008-04-08 413 val_413 2008-04-08
+413 val_413 2008-04-08 413 val_413 2008-04-08
+413 val_413 2008-04-08 413 val_413 2008-04-08
+413 val_413 2008-04-08 413 val_413 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+417 val_417 2008-04-08 417 val_417 2008-04-08
+419 val_419 2008-04-08 419 val_419 2008-04-08
+42 val_42 2008-04-08 42 val_42 2008-04-08
+42 val_42 2008-04-08 42 val_42 2008-04-08
+42 val_42 2008-04-08 42 val_42 2008-04-08
+42 val_42 2008-04-08 42 val_42 2008-04-08
+424 val_424 2008-04-08 424 val_424 2008-04-08
+424 val_424 2008-04-08 424 val_424 2008-04-08
+424 val_424 2008-04-08 424 val_424 2008-04-08
+424 val_424 2008-04-08 424 val_424 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+431 val_431 2008-04-08 431 val_431 2008-04-08
+435 val_435 2008-04-08 435 val_435 2008-04-08
+437 val_437 2008-04-08 437 val_437 2008-04-08
+439 val_439 2008-04-08 439 val_439 2008-04-08
+439 val_439 2008-04-08 439 val_439 2008-04-08
+439 val_439 2008-04-08 439 val_439 2008-04-08
+439 val_439 2008-04-08 439 val_439 2008-04-08
+44 val_44 2008-04-08 44 val_44 2008-04-08
+444 val_444 2008-04-08 444 val_444 2008-04-08
+446 val_446 2008-04-08 446 val_446 2008-04-08
+448 val_448 2008-04-08 448 val_448 2008-04-08
+453 val_453 2008-04-08 453 val_453 2008-04-08
+455 val_455 2008-04-08 455 val_455 2008-04-08
+457 val_457 2008-04-08 457 val_457 2008-04-08
+459 val_459 2008-04-08 459 val_459 2008-04-08
+459 val_459 2008-04-08 459 val_459 2008-04-08
+459 val_459 2008-04-08 459 val_459 2008-04-08
+459 val_459 2008-04-08 459 val_459 2008-04-08
+460 val_460 2008-04-08 460 val_460 2008-04-08
+462 val_462 2008-04-08 462 val_462 2008-04-08
+462 val_462 2008-04-08 462 val_462 2008-04-08
+462 val_462 2008-04-08 462 val_462 2008-04-08
+462 val_462 2008-04-08 462 val_462 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+466 val_466 2008-04-08 466 val_466 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+468 val_468 2008-04-08 468 val_468 2008-04-08
+475 val_475 2008-04-08 475 val_475 2008-04-08
+477 val_477 2008-04-08 477 val_477 2008-04-08
+479 val_479 2008-04-08 479 val_479 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+480 val_480 2008-04-08 480 val_480 2008-04-08
+482 val_482 2008-04-08 482 val_482 2008-04-08
+484 val_484 2008-04-08 484 val_484 2008-04-08
+491 val_491 2008-04-08 491 val_491 2008-04-08
+493 val_493 2008-04-08 493 val_493 2008-04-08
+495 val_495 2008-04-08 495 val_495 2008-04-08
+497 val_497 2008-04-08 497 val_497 2008-04-08
+51 val_51 2008-04-08 51 val_51 2008-04-08
+51 val_51 2008-04-08 51 val_51 2008-04-08
+51 val_51 2008-04-08 51 val_51 2008-04-08
+51 val_51 2008-04-08 51 val_51 2008-04-08
+53 val_53 2008-04-08 53 val_53 2008-04-08
+57 val_57 2008-04-08 57 val_57 2008-04-08
+64 val_64 2008-04-08 64 val_64 2008-04-08
+66 val_66 2008-04-08 66 val_66 2008-04-08
+77 val_77 2008-04-08 77 val_77 2008-04-08
+8 val_8 2008-04-08 8 val_8 2008-04-08
+80 val_80 2008-04-08 80 val_80 2008-04-08
+82 val_82 2008-04-08 82 val_82 2008-04-08
+84 val_84 2008-04-08 84 val_84 2008-04-08
+84 val_84 2008-04-08 84 val_84 2008-04-08
+84 val_84 2008-04-08 84 val_84 2008-04-08
+84 val_84 2008-04-08 84 val_84 2008-04-08
+86 val_86 2008-04-08 86 val_86 2008-04-08
+95 val_95 2008-04-08 95 val_95 2008-04-08
+95 val_95 2008-04-08 95 val_95 2008-04-08
+95 val_95 2008-04-08 95 val_95 2008-04-08
+95 val_95 2008-04-08 95 val_95 2008-04-08
+97 val_97 2008-04-08 97 val_97 2008-04-08
+97 val_97 2008-04-08 97 val_97 2008-04-08
+97 val_97 2008-04-08 97 val_97 2008-04-08
+97 val_97 2008-04-08 97 val_97 2008-04-08
+PREHOOK: query: explain
+select count(*)
+from tab a left outer join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from tab a left outer join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ Statistics: Num rows: 550 Data size: 51700 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*)
+from tab a left outer join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from tab a left outer join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+480
+PREHOOK: query: explain
+select count (*)
+from tab a right outer join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count (*)
+from tab a right outer join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ Statistics: Num rows: 550 Data size: 51700 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count (*)
+from tab a right outer join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count (*)
+from tab a right outer join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+738
+PREHOOK: query: explain
+select count(*)
+from tab a full outer join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*)
+from tab a full outer join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Outer Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ Statistics: Num rows: 550 Data size: 51700 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*)
+from tab a full outer join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*)
+from tab a full outer join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+738
+PREHOOK: query: explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Map 6 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ filterExpr: (key is not null and value is not null) (type: boolean)
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key is not null and value is not null) (type: boolean)
+ Statistics: Num rows: 61 Data size: 5734 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 61 Data size: 5734 Basic stats: COMPLETE Column stats: NONE
+ value expressions: value (type: string)
+ Execution mode: vectorized, llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: b
+ filterExpr: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 23500 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 250 Data size: 23500 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: c
+ filterExpr: value is not null (type: boolean)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col1
+ Statistics: Num rows: 275 Data size: 25850 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col1 (type: string)
+ Statistics: Num rows: 275 Data size: 25850 Basic stats: COMPLETE Column stats: NONE
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col1 (type: string)
+ 1 value (type: string)
+ Statistics: Num rows: 302 Data size: 28435 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 4
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+40
+PREHOOK: query: explain select count(*) from tab a join tab_part b on a.value = b.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from tab a join tab_part b on a.value = b.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ filterExpr: value is not null (type: boolean)
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: b
+ filterExpr: value is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: value is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 23500 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: value (type: string)
+ sort order: +
+ Map-reduce partition columns: value (type: string)
+ Statistics: Num rows: 250 Data size: 23500 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 value (type: string)
+ 1 value (type: string)
+ Statistics: Num rows: 275 Data size: 25850 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: vectorized, uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+480
+PREHOOK: query: explain
+select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key
+UNION ALL
+select s2.key as key, s2.value as value from tab s2
+) a join tab_part b on (a.key = b.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key
+UNION ALL
+select s2.key as key, s2.value as value from tab s2
+) a join tab_part b on (a.key = b.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 7 <- Union 3 (CONTAINS)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE), Union 3 (CONTAINS)
+ Reducer 4 <- Map 8 (SIMPLE_EDGE), Union 3 (SIMPLE_EDGE)
+ Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ filterExpr: key is not null (type: boolean)
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: s3
+ filterExpr: key is not null (type: boolean)
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Map 7
+ Map Operator Tree:
+ TableScan
+ alias: s2
+ filterExpr: key is not null (type: boolean)
+ Statistics: Num rows: 242 Data size: 22748 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 121 Data size: 11374 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 254 Data size: 23885 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: vectorized, llap
+ Map 8
+ Map Operator Tree:
+ TableScan
+ alias: b
+ filterExpr: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: ke
<TRUNCATED>
[22/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
new file mode 100644
index 0000000..1586f8a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
@@ -0,0 +1,1617 @@
+PREHOOK: query: -- Hybrid Grace Hash Join
+-- Test basic functionalities:
+-- 1. Various cases when hash partitions spill
+-- 2. Partitioned table spilling
+-- 3. Vectorization
+
+SELECT 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: -- Hybrid Grace Hash Join
+-- Test basic functionalities:
+-- 1. Various cases when hash partitions spill
+-- 2. Partitioned table spilling
+-- 3. Vectorization
+
+SELECT 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+1
+PREHOOK: query: -- Base result for inner join
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint
+ where c.cint < 2000000000) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Base result for inner join
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint
+ where c.cint < 2000000000) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (cint < 2000000000) (type: boolean)
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 4505 Data size: 968719 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (cint < 2000000000) (type: boolean)
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint
+ where c.cint < 2000000000) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint
+ where c.cint < 2000000000) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+3152013
+PREHOOK: query: -- Two partitions are created. One in memory, one on disk on creation.
+-- The one in memory will eventually exceed memory limit, but won't spill.
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint
+ where c.cint < 2000000000) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Two partitions are created. One in memory, one on disk on creation.
+-- The one in memory will eventually exceed memory limit, but won't spill.
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint
+ where c.cint < 2000000000) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (cint < 2000000000) (type: boolean)
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 4505 Data size: 968719 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (cint < 2000000000) (type: boolean)
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint
+ where c.cint < 2000000000) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint
+ where c.cint < 2000000000) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+3152013
+PREHOOK: query: -- Base result for inner join
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Base result for inner join
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: cint is not null (type: boolean)
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: cint is not null (type: boolean)
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+3152013
+PREHOOK: query: -- 16 partitions are created: 3 in memory, 13 on disk on creation.
+-- 1 partition is spilled during first round processing, which ends up having 2 in memory, 14 on disk
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 16 partitions are created: 3 in memory, 13 on disk on creation.
+-- 1 partition is spilled during first round processing, which ends up having 2 in memory, 14 on disk
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: cint is not null (type: boolean)
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: cint is not null (type: boolean)
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ inner join alltypesorc cd
+ on cd.cint = c.cint) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+3152013
+PREHOOK: query: -- Base result for outer join
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ left outer join alltypesorc cd
+ on cd.cint = c.cint) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Base result for outer join
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ left outer join alltypesorc cd
+ on cd.cint = c.cint) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 13516 Data size: 2906160 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ left outer join alltypesorc cd
+ on cd.cint = c.cint) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ left outer join alltypesorc cd
+ on cd.cint = c.cint) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+3155128
+PREHOOK: query: -- 32 partitions are created. 3 in memory, 29 on disk on creation.
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ left outer join alltypesorc cd
+ on cd.cint = c.cint) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 32 partitions are created. 3 in memory, 29 on disk on creation.
+explain
+select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ left outer join alltypesorc cd
+ on cd.cint = c.cint) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 13516 Data size: 2906160 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: cint (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ left outer join alltypesorc cd
+ on cd.cint = c.cint) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select c.ctinyint
+ from alltypesorc c
+ left outer join alltypesorc cd
+ on cd.cint = c.cint) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+3155128
+PREHOOK: query: -- Partitioned table
+create table parttbl (key string, value char(20)) partitioned by (dt char(10))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parttbl
+POSTHOOK: query: -- Partitioned table
+create table parttbl (key string, value char(20)) partitioned by (dt char(10))
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parttbl
+PREHOOK: query: insert overwrite table parttbl partition(dt='2000-01-01')
+ select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@parttbl@dt=2000-01-01
+POSTHOOK: query: insert overwrite table parttbl partition(dt='2000-01-01')
+ select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@parttbl@dt=2000-01-01
+POSTHOOK: Lineage: parttbl PARTITION(dt=2000-01-01).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: parttbl PARTITION(dt=2000-01-01).value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table parttbl partition(dt='2000-01-02')
+ select * from src1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+PREHOOK: Output: default@parttbl@dt=2000-01-02
+POSTHOOK: query: insert overwrite table parttbl partition(dt='2000-01-02')
+ select * from src1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: default@parttbl@dt=2000-01-02
+POSTHOOK: Lineage: parttbl PARTITION(dt=2000-01-02).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: parttbl PARTITION(dt=2000-01-02).value EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- No spill, base result
+explain
+select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- No spill, base result
+explain
+select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: p1
+ Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 289 Data size: 6872 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: p2
+ Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parttbl
+PREHOOK: Input: default@parttbl@dt=2000-01-01
+PREHOOK: Input: default@parttbl@dt=2000-01-02
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parttbl
+POSTHOOK: Input: default@parttbl@dt=2000-01-01
+POSTHOOK: Input: default@parttbl@dt=2000-01-02
+#### A masked pattern was here ####
+1217
+PREHOOK: query: -- No spill, 2 partitions created in memory
+explain
+select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- No spill, 2 partitions created in memory
+explain
+select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: p1
+ Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 289 Data size: 6872 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: p2
+ Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parttbl
+PREHOOK: Input: default@parttbl@dt=2000-01-01
+PREHOOK: Input: default@parttbl@dt=2000-01-02
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parttbl
+POSTHOOK: Input: default@parttbl@dt=2000-01-01
+POSTHOOK: Input: default@parttbl@dt=2000-01-02
+#### A masked pattern was here ####
+1217
+PREHOOK: query: -- Spill case base result
+explain
+select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Spill case base result
+explain
+select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: p1
+ Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 289 Data size: 6872 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: p2
+ Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parttbl
+PREHOOK: Input: default@parttbl@dt=2000-01-01
+PREHOOK: Input: default@parttbl@dt=2000-01-02
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parttbl
+POSTHOOK: Input: default@parttbl@dt=2000-01-01
+POSTHOOK: Input: default@parttbl@dt=2000-01-02
+#### A masked pattern was here ####
+1217
+PREHOOK: query: -- Spill case, one partition in memory, one spilled on creation
+explain
+select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Spill case, one partition in memory, one spilled on creation
+explain
+select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: p1
+ Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 289 Data size: 6872 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: p2
+ Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 263 Data size: 6248 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parttbl
+PREHOOK: Input: default@parttbl@dt=2000-01-01
+PREHOOK: Input: default@parttbl@dt=2000-01-02
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(select p1.value
+ from parttbl p1
+ inner join parttbl p2
+ on p1.key = p2.key) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parttbl
+POSTHOOK: Input: default@parttbl@dt=2000-01-01
+POSTHOOK: Input: default@parttbl@dt=2000-01-02
+#### A masked pattern was here ####
+1217
+PREHOOK: query: drop table parttbl
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@parttbl
+PREHOOK: Output: default@parttbl
+POSTHOOK: query: drop table parttbl
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@parttbl
+POSTHOOK: Output: default@parttbl
+PREHOOK: query: -- Test vectorization
+-- Test case borrowed from vector_decimal_mapjoin.q
+CREATE TABLE decimal_mapjoin STORED AS ORC AS
+ SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1,
+ CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2,
+ cint
+ FROM alltypesorc
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@decimal_mapjoin
+POSTHOOK: query: -- Test vectorization
+-- Test case borrowed from vector_decimal_mapjoin.q
+CREATE TABLE decimal_mapjoin STORED AS ORC AS
+ SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1,
+ CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2,
+ cint
+ FROM alltypesorc
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@decimal_mapjoin
+PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: l
+ Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (cint = 6981) (type: boolean)
+ Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 6981 (type: int)
+ 1 6981 (type: int)
+ outputColumnNames: _col1, _col9
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: 6981 (type: int), 6981 (type: int), _col1 (type: decimal(20,10)), _col9 (type: decimal(23,14))
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized, llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: r
+ Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (cint = 6981) (type: boolean)
+ Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: 6981 (type: int)
+ sort order: +
+ Map-reduce partition columns: 6981 (type: int)
+ Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
+ value expressions: cdecimal2 (type: decimal(23,14))
+ Execution mode: vectorized, llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_mapjoin
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_mapjoin
+#### A masked pattern was here ####
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 -617.5607769230769
+6981 6981 5831542.269248378 -617.5607769230769
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 6984454.211097692
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 -617.5607769230769
+6981 6981 -515.621072973 -617.5607769230769
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 6984454.211097692
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 -617.5607769230769
+6981 6981 -515.621072973 -617.5607769230769
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 6984454.211097692
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: l
+ Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (cint = 6981) (type: boolean)
+ Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 6981 (type: int)
+ 1 6981 (type: int)
+ outputColumnNames: _col1, _col9
+ input vertices:
+ 1 Map 2
+ Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: 6981 (type: int), 6981 (type: int), _col1 (type: decimal(20,10)), _col9 (type: decimal(23,14))
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 6758 Data size: 1190783 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Execution mode: vectorized, llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: r
+ Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (cint = 6981) (type: boolean)
+ Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: 6981 (type: int)
+ sort order: +
+ Map-reduce partition columns: 6981 (type: int)
+ Statistics: Num rows: 6144 Data size: 1082530 Basic stats: COMPLETE Column stats: NONE
+ value expressions: cdecimal2 (type: decimal(23,14))
+ Execution mode: vectorized, llap
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_mapjoin
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_mapjoin
+#### A masked pattern was here ####
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 -617.5607769230769
+6981 6981 5831542.269248378 -617.5607769230769
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 6984454.211097692
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 5831542.269248378 NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL -617.5607769230769
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL 6984454.211097692
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 NULL NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 -617.5607769230769
+6981 6981 -515.621072973 -617.5607769230769
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 6984454.211097692
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 -617.5607769230769
+6981 6981 -515.621072973 -617.5607769230769
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 6984454.211097692
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+6981 6981 -515.621072973 NULL
+PREHOOK: query: DROP TABLE decimal_mapjoin
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@decimal_mapjoin
+PREHOOK: Output: default@decimal_mapjoin
+POSTHOOK: query: DROP TABLE decimal_mapjoin
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@decimal_mapjoin
+POSTHOOK: Output: default@decimal_mapjoin
[40/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out
new file mode 100644
index 0000000..5459914
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out
@@ -0,0 +1,3521 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl1
+PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl2
+PREHOOK: query: insert overwrite table tbl1
+select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl1
+POSTHOOK: query: insert overwrite table tbl1
+select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl1
+POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table tbl2
+select * from src where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tbl2
+POSTHOOK: query: insert overwrite table tbl2
+select * from src where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tbl2
+POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+22
+PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select key, count(*) from
+(
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select key, count(*) from
+(
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select key, count(*) from
+(
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select key, count(*) from
+(
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+0 9
+2 1
+4 1
+5 9
+8 1
+9 1
+PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from
+(
+ select key, count(*) from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1
+ group by key
+) subq2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from
+(
+ select key, count(*) from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1
+ group by key
+) subq2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 4 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+(
+ select key, count(*) from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1
+ group by key
+) subq2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+(
+ select key, count(*) from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1
+ group by key
+) subq2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+6
+PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them.
+-- Each sub-query should be converted to a sort-merge join.
+explain
+select src1.key, src1.cnt1, src2.cnt1 from
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1 group by key
+) src1
+join
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq2 group by key
+) src2
+on src1.key = src2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them.
+-- Each sub-query should be converted to a sort-merge join.
+explain
+select src1.key, src1.cnt1, src2.cnt1 from
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1 group by key
+) src1
+join
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq2 group by key
+) src2
+on src1.key = src2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Map 4 <- Map 6 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (BROADCAST_EDGE)
+ Reducer 5 <- Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 6
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 6
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col1, _col3
+ input vertices:
+ 1 Reducer 5
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col0 (type: int), _col1 (type: bigint), _col3 (type: bigint)
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ Reducer 5
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1 group by key
+) src1
+join
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq2 group by key
+) src2
+on src1.key = src2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1 group by key
+) src1
+join
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq2 group by key
+) src2
+on src1.key = src2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+0 9 9
+2 1 1
+4 1 1
+5 9 9
+8 1 1
+9 1 1
+PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should
+-- be converted to a sort-merge join.
+explain
+select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should
+-- be converted to a sort-merge join.
+explain
+select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should
+-- be converted to a sort-merge join, although there is more than one level of sub-query
+explain
+select count(*) from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+ join tbl2 b
+ on subq2.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should
+-- be converted to a sort-merge join, although there is more than one level of sub-query
+explain
+select count(*) from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+ join tbl2 b
+ on subq2.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 key (type: int)
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+ join tbl2 b
+ on subq2.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+ join tbl2 b
+ on subq2.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+ join
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq3
+ where key < 6
+ ) subq4
+ on subq2.key = subq4.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+ join
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq3
+ where key < 6
+ ) subq4
+ on subq2.key = subq4.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+ join
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq3
+ where key < 6
+ ) subq4
+ on subq2.key = subq4.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+ join
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq3
+ where key < 6
+ ) subq4
+ on subq2.key = subq4.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key
+-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one
+-- item, but that is not part of the join key.
+explain
+select count(*) from
+ (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1
+ join
+ (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key
+-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one
+-- item, but that is not part of the join key.
+explain
+select count(*) from
+ (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1
+ join
+ (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 8) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 8) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+ (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1
+ join
+ (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+ (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1
+ join
+ (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized mapside
+-- join should be performed
+explain
+select count(*) from
+ (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1
+ join
+ (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized mapside
+-- join should be performed
+explain
+select count(*) from
+ (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1
+ join
+ (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: (key + 1) (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: _col0 is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: (key + 1) (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: _col0 is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+ (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1
+ join
+ (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+ on subq1.key = subq2.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+ (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1
+ join
+ (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2
+ on subq1.key = subq2.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+22
+PREHOOK: query: -- The left table is a sub-query and the right table is not.
+-- It should be converted to a sort-merge join.
+explain
+select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join tbl2 a on subq1.key = a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The left table is a sub-query and the right table is not.
+-- It should be converted to a sort-merge join.
+explain
+select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join tbl2 a on subq1.key = a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 key (type: int)
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join tbl2 a on subq1.key = a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join tbl2 a on subq1.key = a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- The right table is a sub-query and the left table is not.
+-- It should be converted to a sort-merge join.
+explain
+select count(*) from tbl1 a
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq1
+ on a.key = subq1.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The right table is a sub-query and the left table is not.
+-- It should be converted to a sort-merge join.
+explain
+select count(*) from tbl1 a
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq1
+ on a.key = subq1.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 _col0 (type: int)
+ input vertices:
+ 1 Map 1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from tbl1 a
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq1
+ on a.key = subq1.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from tbl1 a
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq1
+ on a.key = subq1.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries.
+-- It should be converted to to a sort-merge join
+explain
+select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on (subq1.key = subq2.key)
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+ on (subq1.key = subq3.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries.
+-- It should be converted to to a sort-merge join
+explain
+select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on (subq1.key = subq2.key)
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+ on (subq1.key = subq3.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Inner Join 0 to 2
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ 2 _col0 (type: int)
+ input vertices:
+ 1 Map 3
+ 2 Map 4
+ Statistics: Num rows: 6 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (key < 6) (type: boolean)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+ on (subq1.key = subq3.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from
+ (select a.key as key, a.value as value from tbl1 a where key < 6) subq1
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq2
+ on subq1.key = subq2.key
+ join
+ (select a.key as key, a.value as value from tbl2 a where key < 6) subq3
+ on (subq1.key = subq3.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+56
+PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from (
+ select subq2.key as key, subq2.value as value1, b.value as value2 from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that.
+-- The join should be converted to a sort-merge join
+explain
+select count(*) from (
+ select subq2.key as key, subq2.value as value1, b.value as value2 from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 key (type: int)
+ input vertices:
+ 0 Map 1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (
+ select subq2.key as key, subq2.value as value1, b.value as value2 from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (
+ select subq2.key as key, subq2.value as value1, b.value as value2 from
+ (
+ select * from
+ (
+ select a.key as key, a.value as value from tbl1 a where key < 8
+ ) subq1
+ where key < 6
+ ) subq2
+join tbl2 b
+on subq2.key = b.key) a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+20
+PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) from (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+22
+PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select key, count(*) from
+(
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join
+explain
+select key, count(*) from
+(
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: int)
+ sort order: +
+ Map-reduce partition columns: key (type: int)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: int)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select key, count(*) from
+(
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl1
+PREHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select key, count(*) from
+(
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+) subq1
+group by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl1
+POSTHOOK: Input: default@tbl2
+#### A masked pattern was here ####
+0 9
+2 1
+4 1
+5 9
+8 1
+9 1
+PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from
+(
+ select key, count(*) from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1
+ group by key
+) subq2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join
+explain
+select count(*) from
+(
+ select key, count(*) from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1
+ group by key
+) subq2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 4 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 key (type: int)
+ 1 key (type: int)
+ outputColumnNames: _col0
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ keys: _col0 (type: int)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: int)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: int)
+ Statist
<TRUNCATED>
[42/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out
new file mode 100644
index 0000000..c237025
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out
@@ -0,0 +1,1200 @@
+PREHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket
+CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_small@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_small@ds=2008-04-09
+PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-08
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-08
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@bucket_big@ds=2008-04-09
+POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@bucket_big@ds=2008-04-09
+PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter
+explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 2 <- Map 1 (BROADCAST_EDGE)
+ Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [a]
+ /bucket_small/ds=2008-04-09 [a]
+ Map 2
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 1 => 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 0 Map 1
+ Position of Big Table: 1
+ Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [b]
+ /bucket_big/ds=2008-04-09 [b]
+ Reducer 3
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+76
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ /bucket_big/ds=2008-04-09 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ /bucket_small/ds=2008-04-09 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+76
+PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_JOIN
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_big
+ a
+ TOK_TABREF
+ TOK_TABNAME
+ bucket_small
+ b
+ =
+ .
+ TOK_TABLE_OR_COL
+ a
+ key
+ .
+ TOK_TABLE_OR_COL
+ b
+ key
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_FUNCTIONSTAR
+ count
+
+
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 3 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ Estimated key counts: Map 3 => 2
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ input vertices:
+ 1 Map 3
+ Position of Big Table: 0
+ Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Group By Operator
+ aggregations: count()
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ tag: -1
+ value expressions: _col0 (type: bigint)
+ auto parallelism: false
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ numFiles 2
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 2750
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 2
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_big
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_big { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_big
+ name: default.bucket_big
+ Truncated Path -> Alias:
+ /bucket_big/ds=2008-04-08 [a]
+ /bucket_big/ds=2008-04-09 [a]
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: b
+ Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ auto parallelism: true
+ Execution mode: llap
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-08
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+#### A masked pattern was here ####
+ Partition
+ base file name: ds=2008-04-09
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ numFiles 4
+ numRows 0
+ partition_columns ds
+ partition_columns.types string
+ rawDataSize 0
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 226
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ SORTBUCKETCOLSPREFIX TRUE
+ bucket_count 4
+ bucket_field_name key
+ columns key,value
+ columns.comments
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.bucket_small
+ partition_columns ds
+ partition_columns.types string
+ serialization.ddl struct bucket_small { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.bucket_small
+ name: default.bucket_small
+ Truncated Path -> Alias:
+ /bucket_small/ds=2008-04-08 [b]
+ /bucket_small/ds=2008-04-09 [b]
+ Reducer 2
+ Execution mode: uber
+ Needs Tagging: false
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ columns _col0
+ columns.types bigint
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@ds=2008-04-08
+PREHOOK: Input: default@bucket_big@ds=2008-04-09
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@ds=2008-04-08
+PREHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@ds=2008-04-08
+POSTHOOK: Input: default@bucket_big@ds=2008-04-09
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@ds=2008-04-08
+POSTHOOK: Input: default@bucket_small@ds=2008-04-09
+#### A masked pattern was here ####
+76
[33/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out b/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out
new file mode 100644
index 0000000..d474b7e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out
@@ -0,0 +1,3084 @@
+PREHOOK: query: -- This query has a GroupByOperator folling JoinOperator and they share the same keys.
+-- When Correlation Optimizer is turned off, three MR jobs will be generated.
+-- When Correlation Optimizer is turned on, two MR jobs will be generated
+-- and JoinOperator (on the column of key) and GroupByOperator (also on the column
+-- of key) will be executed in the first MR job.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This query has a GroupByOperator folling JoinOperator and they share the same keys.
+-- When Correlation Optimizer is turned off, three MR jobs will be generated.
+-- When Correlation Optimizer is turned on, two MR jobs will be generated
+-- and JoinOperator (on the column of key) and GroupByOperator (also on the column
+-- of key) will be executed in the first MR job.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+652447 37
+PREHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+652447 37
+PREHOOK: query: -- Enable hive.auto.convert.join.
+-- Correlation Optimizer will detect that the join will be converted to a Map-join,
+-- so it will not try to optimize this query.
+-- We should generate 1 MR job for subquery tmp.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Enable hive.auto.convert.join.
+-- Correlation Optimizer will detect that the join will be converted to a Map-join,
+-- so it will not try to optimize this query.
+-- We should generate 1 MR job for subquery tmp.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Map 1 <- Map 4 (BROADCAST_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col1
+ input vertices:
+ 1 Map 4
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ HybridGraceHashJoin: true
+ Select Operator
+ expressions: _col1 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+652447 37
+PREHOOK: query: -- If the key of a GroupByOperator is the left table's key in
+-- a Left Semi Join, these two operators will be executed in
+-- the same MR job when Correlation Optimizer is enabled.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- If the key of a GroupByOperator is the left table's key in
+-- a Left Semi Join, these two operators will be executed in
+-- the same MR job when Correlation Optimizer is enabled.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Semi Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+652447 15
+PREHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Semi Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+652447 15
+PREHOOK: query: -- If the key of a GroupByOperator is the left table's key in
+-- a Left Outer Join, these two operators will be executed in
+-- the same MR job when Correlation Optimizer is enabled.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- If the key of a GroupByOperator is the left table's key in
+-- a Left Outer Join, these two operators will be executed in
+-- the same MR job when Correlation Optimizer is enabled.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+652447 47
+PREHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+652447 47
+PREHOOK: query: -- If the key of a GroupByOperator is the right table's key in
+-- a Left Outer Join, we cannot use a single MR to execute these two
+-- operators because those keys with a null value are not grouped.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- If the key of a GroupByOperator is the right table's key in
+-- a Left Outer Join, we cannot use a single MR to execute these two
+-- operators because those keys with a null value are not grouped.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+652447 47
+PREHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+652447 47
+PREHOOK: query: -- If a column of the key of a GroupByOperator is the right table's key in
+-- a Left Outer Join, we cannot use a single MR to execute these two
+-- operators because those keys with a null value are not grouped.
+EXPLAIN
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- If a column of the key of a GroupByOperator is the right table's key in
+-- a Left Outer Join, we cannot use a single MR to execute these two
+-- operators because those keys with a null value are not grouped.
+EXPLAIN
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string), _col1 (type: string)
+ 1 _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col1, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string), _col1 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col2 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+ NULL 10
+128 NULL 1
+146 val_146 2
+150 val_150 1
+213 val_213 2
+224 NULL 1
+238 val_238 2
+255 val_255 2
+273 val_273 3
+278 val_278 2
+311 val_311 3
+369 NULL 1
+401 val_401 5
+406 val_406 4
+66 val_66 1
+98 val_98 2
+PREHOOK: query: EXPLAIN
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string), _col1 (type: string)
+ 1 _col0 (type: string), _col1 (type: string)
+ outputColumnNames: _col1, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col2 (type: string), _col1 (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string), _col1 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string), _col1 (type: string)
+ sort order: ++
+ Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col2 (type: bigint)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string), KEY._col1 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+ NULL 10
+128 NULL 1
+146 val_146 2
+150 val_150 1
+213 val_213 2
+224 NULL 1
+238 val_238 2
+255 val_255 2
+273 val_273 3
+278 val_278 2
+311 val_311 3
+369 NULL 1
+401 val_401 5
+406 val_406 4
+66 val_66 1
+98 val_98 2
+PREHOOK: query: -- If the key of a GroupByOperator is the right table's key in
+-- a Right Outer Join, these two operators will be executed in
+-- the same MR job when Correlation Optimizer is enabled.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- If the key of a GroupByOperator is the right table's key in
+-- a Right Outer Join, these two operators will be executed in
+-- the same MR job when Correlation Optimizer is enabled.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+12744278 500
+PREHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: y
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Map 5
+ Map Operator Tree:
+ TableScan
+ alias: x
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Left Outer Join0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 3
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: hash(_col0) (type: int), hash(_col1) (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: sum(_col0), sum(_col1)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: bigint), _col1 (type: bigint)
+ Reducer 4
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0), sum(VALUE._col1)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT y.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY y.key) tmp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####
+12744278 500
+PREHOOK: query: -- If the key of a GroupByOperator is the left table's key in
+-- a Right Outer Join, we cannot use a single MR to execute these two
+-- operators because those keys with a null value are not grouped.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+PREHOOK: type: QUERY
+POSTHOOK: query: -- If the key of a GroupByOperator is the left table's key in
+-- a Right Outer Join, we cannot use a single MR to execute these two
+-- operators because those keys with a null value are not grouped.
+EXPLAIN
+SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
+FROM (SELECT x.key AS key, count(1) AS cnt
+ FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key)
+ GROUP BY x.key) tmp
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+ Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
<TRUNCATED>
[23/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/having.q.out b/ql/src/test/results/clientpositive/llap/having.q.out
new file mode 100644
index 0000000..28a515d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/having.q.out
@@ -0,0 +1,1298 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: key, value
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(value)
+ keys: key (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: bigint)
+ outputColumnNames: _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (_col1 > 3) (type: boolean)
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col1 (type: bigint)
+ outputColumnNames: _col0
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+4
+4
+4
+4
+4
+4
+5
+5
+5
+5
+PREHOOK: query: EXPLAIN SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (UDFToDouble(key) <> 302.0) (type: boolean)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: max(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, max(value) AS c FROM src GROUP BY key HAVING key != 302
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0 val_0
+10 val_10
+100 val_100
+103 val_103
+104 val_104
+105 val_105
+11 val_11
+111 val_111
+113 val_113
+114 val_114
+116 val_116
+118 val_118
+119 val_119
+12 val_12
+120 val_120
+125 val_125
+126 val_126
+128 val_128
+129 val_129
+131 val_131
+133 val_133
+134 val_134
+136 val_136
+137 val_137
+138 val_138
+143 val_143
+145 val_145
+146 val_146
+149 val_149
+15 val_15
+150 val_150
+152 val_152
+153 val_153
+155 val_155
+156 val_156
+157 val_157
+158 val_158
+160 val_160
+162 val_162
+163 val_163
+164 val_164
+165 val_165
+166 val_166
+167 val_167
+168 val_168
+169 val_169
+17 val_17
+170 val_170
+172 val_172
+174 val_174
+175 val_175
+176 val_176
+177 val_177
+178 val_178
+179 val_179
+18 val_18
+180 val_180
+181 val_181
+183 val_183
+186 val_186
+187 val_187
+189 val_189
+19 val_19
+190 val_190
+191 val_191
+192 val_192
+193 val_193
+194 val_194
+195 val_195
+196 val_196
+197 val_197
+199 val_199
+2 val_2
+20 val_20
+200 val_200
+201 val_201
+202 val_202
+203 val_203
+205 val_205
+207 val_207
+208 val_208
+209 val_209
+213 val_213
+214 val_214
+216 val_216
+217 val_217
+218 val_218
+219 val_219
+221 val_221
+222 val_222
+223 val_223
+224 val_224
+226 val_226
+228 val_228
+229 val_229
+230 val_230
+233 val_233
+235 val_235
+237 val_237
+238 val_238
+239 val_239
+24 val_24
+241 val_241
+242 val_242
+244 val_244
+247 val_247
+248 val_248
+249 val_249
+252 val_252
+255 val_255
+256 val_256
+257 val_257
+258 val_258
+26 val_26
+260 val_260
+262 val_262
+263 val_263
+265 val_265
+266 val_266
+27 val_27
+272 val_272
+273 val_273
+274 val_274
+275 val_275
+277 val_277
+278 val_278
+28 val_28
+280 val_280
+281 val_281
+282 val_282
+283 val_283
+284 val_284
+285 val_285
+286 val_286
+287 val_287
+288 val_288
+289 val_289
+291 val_291
+292 val_292
+296 val_296
+298 val_298
+30 val_30
+305 val_305
+306 val_306
+307 val_307
+308 val_308
+309 val_309
+310 val_310
+311 val_311
+315 val_315
+316 val_316
+317 val_317
+318 val_318
+321 val_321
+322 val_322
+323 val_323
+325 val_325
+327 val_327
+33 val_33
+331 val_331
+332 val_332
+333 val_333
+335 val_335
+336 val_336
+338 val_338
+339 val_339
+34 val_34
+341 val_341
+342 val_342
+344 val_344
+345 val_345
+348 val_348
+35 val_35
+351 val_351
+353 val_353
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+365 val_365
+366 val_366
+367 val_367
+368 val_368
+369 val_369
+37 val_37
+373 val_373
+374 val_374
+375 val_375
+377 val_377
+378 val_378
+379 val_379
+382 val_382
+384 val_384
+386 val_386
+389 val_389
+392 val_392
+393 val_393
+394 val_394
+395 val_395
+396 val_396
+397 val_397
+399 val_399
+4 val_4
+400 val_400
+401 val_401
+402 val_402
+403 val_403
+404 val_404
+406 val_406
+407 val_407
+409 val_409
+41 val_41
+411 val_411
+413 val_413
+414 val_414
+417 val_417
+418 val_418
+419 val_419
+42 val_42
+421 val_421
+424 val_424
+427 val_427
+429 val_429
+43 val_43
+430 val_430
+431 val_431
+432 val_432
+435 val_435
+436 val_436
+437 val_437
+438 val_438
+439 val_439
+44 val_44
+443 val_443
+444 val_444
+446 val_446
+448 val_448
+449 val_449
+452 val_452
+453 val_453
+454 val_454
+455 val_455
+457 val_457
+458 val_458
+459 val_459
+460 val_460
+462 val_462
+463 val_463
+466 val_466
+467 val_467
+468 val_468
+469 val_469
+47 val_47
+470 val_470
+472 val_472
+475 val_475
+477 val_477
+478 val_478
+479 val_479
+480 val_480
+481 val_481
+482 val_482
+483 val_483
+484 val_484
+485 val_485
+487 val_487
+489 val_489
+490 val_490
+491 val_491
+492 val_492
+493 val_493
+494 val_494
+495 val_495
+496 val_496
+497 val_497
+498 val_498
+5 val_5
+51 val_51
+53 val_53
+54 val_54
+57 val_57
+58 val_58
+64 val_64
+65 val_65
+66 val_66
+67 val_67
+69 val_69
+70 val_70
+72 val_72
+74 val_74
+76 val_76
+77 val_77
+78 val_78
+8 val_8
+80 val_80
+82 val_82
+83 val_83
+84 val_84
+85 val_85
+86 val_86
+87 val_87
+9 val_9
+90 val_90
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
+PREHOOK: query: EXPLAIN SELECT key FROM src GROUP BY key HAVING max(value) > "val_255"
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key FROM src GROUP BY key HAVING max(value) > "val_255"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: max(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (_col1 > 'val_255') (type: boolean)
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT key FROM src GROUP BY key HAVING max(value) > "val_255"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key FROM src GROUP BY key HAVING max(value) > "val_255"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+256
+257
+258
+26
+260
+262
+263
+265
+266
+27
+272
+273
+274
+275
+277
+278
+28
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+291
+292
+296
+298
+30
+302
+305
+306
+307
+308
+309
+310
+311
+315
+316
+317
+318
+321
+322
+323
+325
+327
+33
+331
+332
+333
+335
+336
+338
+339
+34
+341
+342
+344
+345
+348
+35
+351
+353
+356
+360
+362
+364
+365
+366
+367
+368
+369
+37
+373
+374
+375
+377
+378
+379
+382
+384
+386
+389
+392
+393
+394
+395
+396
+397
+399
+4
+400
+401
+402
+403
+404
+406
+407
+409
+41
+411
+413
+414
+417
+418
+419
+42
+421
+424
+427
+429
+43
+430
+431
+432
+435
+436
+437
+438
+439
+44
+443
+444
+446
+448
+449
+452
+453
+454
+455
+457
+458
+459
+460
+462
+463
+466
+467
+468
+469
+47
+470
+472
+475
+477
+478
+479
+480
+481
+482
+483
+484
+485
+487
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+5
+51
+53
+54
+57
+58
+64
+65
+66
+67
+69
+70
+72
+74
+76
+77
+78
+8
+80
+82
+83
+84
+85
+86
+87
+9
+90
+92
+95
+96
+97
+98
+PREHOOK: query: EXPLAIN SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255"
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (UDFToDouble(key) > 300.0) (type: boolean)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: max(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (_col1 > 'val_255') (type: boolean)
+ Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: _col0 (type: string)
+ outputColumnNames: _col0
+ Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key FROM src where key > 300 GROUP BY key HAVING max(value) > "val_255"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+302
+305
+306
+307
+308
+309
+310
+311
+315
+316
+317
+318
+321
+322
+323
+325
+327
+331
+332
+333
+335
+336
+338
+339
+341
+342
+344
+345
+348
+351
+353
+356
+360
+362
+364
+365
+366
+367
+368
+369
+373
+374
+375
+377
+378
+379
+382
+384
+386
+389
+392
+393
+394
+395
+396
+397
+399
+400
+401
+402
+403
+404
+406
+407
+409
+411
+413
+414
+417
+418
+419
+421
+424
+427
+429
+430
+431
+432
+435
+436
+437
+438
+439
+443
+444
+446
+448
+449
+452
+453
+454
+455
+457
+458
+459
+460
+462
+463
+466
+467
+468
+469
+470
+472
+475
+477
+478
+479
+480
+481
+482
+483
+484
+485
+487
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+PREHOOK: query: EXPLAIN SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255"
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: max(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: max(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (_col1 > 'val_255') (type: boolean)
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, max(value) FROM src GROUP BY key HAVING max(value) > "val_255"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+256 val_256
+257 val_257
+258 val_258
+26 val_26
+260 val_260
+262 val_262
+263 val_263
+265 val_265
+266 val_266
+27 val_27
+272 val_272
+273 val_273
+274 val_274
+275 val_275
+277 val_277
+278 val_278
+28 val_28
+280 val_280
+281 val_281
+282 val_282
+283 val_283
+284 val_284
+285 val_285
+286 val_286
+287 val_287
+288 val_288
+289 val_289
+291 val_291
+292 val_292
+296 val_296
+298 val_298
+30 val_30
+302 val_302
+305 val_305
+306 val_306
+307 val_307
+308 val_308
+309 val_309
+310 val_310
+311 val_311
+315 val_315
+316 val_316
+317 val_317
+318 val_318
+321 val_321
+322 val_322
+323 val_323
+325 val_325
+327 val_327
+33 val_33
+331 val_331
+332 val_332
+333 val_333
+335 val_335
+336 val_336
+338 val_338
+339 val_339
+34 val_34
+341 val_341
+342 val_342
+344 val_344
+345 val_345
+348 val_348
+35 val_35
+351 val_351
+353 val_353
+356 val_356
+360 val_360
+362 val_362
+364 val_364
+365 val_365
+366 val_366
+367 val_367
+368 val_368
+369 val_369
+37 val_37
+373 val_373
+374 val_374
+375 val_375
+377 val_377
+378 val_378
+379 val_379
+382 val_382
+384 val_384
+386 val_386
+389 val_389
+392 val_392
+393 val_393
+394 val_394
+395 val_395
+396 val_396
+397 val_397
+399 val_399
+4 val_4
+400 val_400
+401 val_401
+402 val_402
+403 val_403
+404 val_404
+406 val_406
+407 val_407
+409 val_409
+41 val_41
+411 val_411
+413 val_413
+414 val_414
+417 val_417
+418 val_418
+419 val_419
+42 val_42
+421 val_421
+424 val_424
+427 val_427
+429 val_429
+43 val_43
+430 val_430
+431 val_431
+432 val_432
+435 val_435
+436 val_436
+437 val_437
+438 val_438
+439 val_439
+44 val_44
+443 val_443
+444 val_444
+446 val_446
+448 val_448
+449 val_449
+452 val_452
+453 val_453
+454 val_454
+455 val_455
+457 val_457
+458 val_458
+459 val_459
+460 val_460
+462 val_462
+463 val_463
+466 val_466
+467 val_467
+468 val_468
+469 val_469
+47 val_47
+470 val_470
+472 val_472
+475 val_475
+477 val_477
+478 val_478
+479 val_479
+480 val_480
+481 val_481
+482 val_482
+483 val_483
+484 val_484
+485 val_485
+487 val_487
+489 val_489
+490 val_490
+491 val_491
+492 val_492
+493 val_493
+494 val_494
+495 val_495
+496 val_496
+497 val_497
+498 val_498
+5 val_5
+51 val_51
+53 val_53
+54 val_54
+57 val_57
+58 val_58
+64 val_64
+65 val_65
+66 val_66
+67 val_67
+69 val_69
+70 val_70
+72 val_72
+74 val_74
+76 val_76
+77 val_77
+78 val_78
+8 val_8
+80 val_80
+82 val_82
+83 val_83
+84 val_84
+85 val_85
+86 val_86
+87 val_87
+9 val_9
+90 val_90
+92 val_92
+95 val_95
+96 val_96
+97 val_97
+98 val_98
+PREHOOK: query: EXPLAIN SELECT key, COUNT(value) FROM src GROUP BY key HAVING count(value) >= 4
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key, COUNT(value) FROM src GROUP BY key HAVING count(value) >= 4
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (_col1 >= 4) (type: boolean)
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: SELECT key, COUNT(value) FROM src GROUP BY key HAVING count(value) >= 4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT key, COUNT(value) FROM src GROUP BY key HAVING count(value) >= 4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+138 4
+169 4
+230 5
+277 4
+348 5
+401 5
+406 4
+468 4
+469 5
+489 4
[07/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge6.q.out b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
new file mode 100644
index 0000000..847b22d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge6.q.out
@@ -0,0 +1,518 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc file merge tests for static partitions
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc file merge tests for static partitions
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5a
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ filterExpr: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ hour 24
+ year 2000
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 3 files total
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: -- 3 files total
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+Found 3 items
+#### A masked pattern was here ####
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+year=2000/hour=24
+year=2001/hour=24
+PREHOOK: query: select * from orc_merge5a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@year=2000/hour=24
+PREHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 2000 24
+13 bar 80.0 2 1969-12-31 16:00:05 2001 24
+2 foo 0.8 1 1969-12-31 16:00:00 2000 24
+2 foo 0.8 1 1969-12-31 16:00:00 2001 24
+5 eat 0.8 6 1969-12-31 16:00:20 2000 24
+5 eat 0.8 6 1969-12-31 16:00:20 2001 24
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
+ Stage-5
+ Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4
+ Stage-6
+ Stage-7 depends on stages: Stage-6
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ filterExpr: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ predicate: (userid <= 13) (type: boolean)
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+ Execution mode: llap
+
+ Stage: Stage-8
+ Conditional Operator
+
+ Stage: Stage-5
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ hour 24
+ year 2000
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-4
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-7
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+year=2000/hour=24
+year=2001/hour=24
+PREHOOK: query: select * from orc_merge5a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@year=2000/hour=24
+PREHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 2000 24
+13 bar 80.0 2 1969-12-31 16:00:05 2001 24
+2 foo 0.8 1 1969-12-31 16:00:00 2000 24
+2 foo 0.8 1 1969-12-31 16:00:00 2001 24
+5 eat 0.8 6 1969-12-31 16:00:20 2000 24
+5 eat 0.8 6 1969-12-31 16:00:20 2001 24
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2000,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: insert overwrite table orc_merge5a partition (year="2001",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+Found 3 items
+#### A masked pattern was here ####
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+year=2000/hour=24
+year=2001/hour=24
+PREHOOK: query: select * from orc_merge5a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@year=2000/hour=24
+PREHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 2000 24
+13 bar 80.0 2 1969-12-31 16:00:05 2001 24
+2 foo 0.8 1 1969-12-31 16:00:00 2000 24
+2 foo 0.8 1 1969-12-31 16:00:00 2001 24
+5 eat 0.8 6 1969-12-31 16:00:20 2000 24
+5 eat 0.8 6 1969-12-31 16:00:20 2001 24
+PREHOOK: query: explain alter table orc_merge5a partition(year="2000",hour=24) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: query: explain alter table orc_merge5a partition(year="2000",hour=24) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-1 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-0
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ partition:
+ hour 24
+ year 2000
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: alter table orc_merge5a partition(year="2000",hour=24) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: alter table orc_merge5a partition(year="2000",hour=24) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: alter table orc_merge5a partition(year="2001",hour=24) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: alter table orc_merge5a partition(year="2001",hour=24) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24
+PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@year=2001/hour=24
+POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+year=2000/hour=24
+year=2001/hour=24
+PREHOOK: query: select * from orc_merge5a
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@year=2000/hour=24
+PREHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@year=2000/hour=24
+POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 2000 24
+13 bar 80.0 2 1969-12-31 16:00:05 2001 24
+2 foo 0.8 1 1969-12-31 16:00:00 2000 24
+2 foo 0.8 1 1969-12-31 16:00:00 2001 24
+5 eat 0.8 6 1969-12-31 16:00:20 2000 24
+5 eat 0.8 6 1969-12-31 16:00:20 2001 24
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge7.q.out b/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
new file mode 100644
index 0000000..eee65c0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge7.q.out
@@ -0,0 +1,629 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc merge file tests for dynamic partition case
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- orc merge file tests for dynamic partition case
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_merge5a
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_merge5
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_merge5
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+ Execution mode: llap
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ st
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 3 files total
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: -- 3 files total
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 1 items
+#### A masked pattern was here ####
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+PREHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+POSTHOOK: query: -- 3 mappers
+explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
+ Stage-5
+ Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4
+ Stage-6
+ Stage-7 depends on stages: Stage-6
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: orc_merge5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+ Execution mode: llap
+
+ Stage: Stage-8
+ Conditional Operator
+
+ Stage: Stage-5
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ partition:
+ st
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-3
+ Stats-Aggr Operator
+
+ Stage: Stage-4
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-6
+ Tez
+#### A masked pattern was here ####
+ Vertices:
+ File Merge
+ Merge File Operator
+ Map Operator Tree:
+ ORC File Merge Operator
+ merge level: stripe
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+
+ Stage: Stage-7
+ Move Operator
+ files:
+ hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5
+PREHOOK: Output: default@orc_merge5a
+POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: Output: default@orc_merge5a@st=1.8
+POSTHOOK: Output: default@orc_merge5a@st=8.0
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ]
+PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 1 items
+#### A masked pattern was here ####
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
+PREHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+ Stage-1 depends on stages: Stage-0
+ Stage-2 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-0
+
+ Stage: Stage-1
+ Move Operator
+ tables:
+ partition:
+ st 80.0
+ replace: true
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.orc_merge5a
+
+ Stage: Stage-2
+ Stats-Aggr Operator
+
+PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+PREHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=80.0
+POSTHOOK: query: -- 1 file after merging
+analyze table orc_merge5a partition(st=80.0) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=80.0
+PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a
+PREHOOK: Output: default@orc_merge5a@st=0.8
+POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a
+POSTHOOK: Output: default@orc_merge5a@st=0.8
+Found 1 items
+#### A masked pattern was here ####
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: show partitions orc_merge5a
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@orc_merge5a
+POSTHOOK: query: show partitions orc_merge5a
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@orc_merge5a
+st=0.8
+st=1.8
+st=8.0
+st=80.0
+PREHOOK: query: select * from orc_merge5a where userid<=13
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_merge5a
+PREHOOK: Input: default@orc_merge5a@st=0.8
+PREHOOK: Input: default@orc_merge5a@st=1.8
+PREHOOK: Input: default@orc_merge5a@st=8.0
+PREHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+POSTHOOK: query: select * from orc_merge5a where userid<=13
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_merge5a
+POSTHOOK: Input: default@orc_merge5a@st=0.8
+POSTHOOK: Input: default@orc_merge5a@st=1.8
+POSTHOOK: Input: default@orc_merge5a@st=8.0
+POSTHOOK: Input: default@orc_merge5a@st=80.0
+#### A masked pattern was here ####
+13 bar 80.0 2 1969-12-31 16:00:05 80.0
+2 foo 0.8 1 1969-12-31 16:00:00 0.8
+5 eat 0.8 6 1969-12-31 16:00:20 0.8
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge8.q.out b/ql/src/test/results/clientpositive/llap/orc_merge8.q.out
new file mode 100644
index 0000000..f4f4b4a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge8.q.out
@@ -0,0 +1,130 @@
+PREHOOK: query: create table if not exists alltypes (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) row format delimited fields terminated by '|'
+collection items terminated by ','
+map keys terminated by ':' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: create table if not exists alltypes (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) row format delimited fields terminated by '|'
+collection items terminated by ','
+map keys terminated by ':' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypes
+PREHOOK: query: create table alltypes_orc like alltypes
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: create table alltypes_orc like alltypes
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: alter table alltypes_orc set fileformat orc
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc set fileformat orc
+POSTHOOK: type: ALTERTABLE_FILEFORMAT
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@alltypes
+PREHOOK: query: insert overwrite table alltypes_orc select * from alltypes
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: insert overwrite table alltypes_orc select * from alltypes
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes
+POSTHOOK: Output: default@alltypes_orc
+POSTHOOK: Lineage: alltypes_orc.bi SIMPLE [(alltypes)alltypes.FieldSchema(name:bi, type:bigint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.bo SIMPLE [(alltypes)alltypes.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.c SIMPLE [(alltypes)alltypes.FieldSchema(name:c, type:char(5), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.d SIMPLE [(alltypes)alltypes.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.da SIMPLE [(alltypes)alltypes.FieldSchema(name:da, type:date, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.de SIMPLE [(alltypes)alltypes.FieldSchema(name:de, type:decimal(10,3), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.f SIMPLE [(alltypes)alltypes.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.i SIMPLE [(alltypes)alltypes.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.l SIMPLE [(alltypes)alltypes.FieldSchema(name:l, type:array<int>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.m SIMPLE [(alltypes)alltypes.FieldSchema(name:m, type:map<string,string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.s SIMPLE [(alltypes)alltypes.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.si SIMPLE [(alltypes)alltypes.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.st SIMPLE [(alltypes)alltypes.FieldSchema(name:st, type:struct<c1:int,c2:string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ti SIMPLE [(alltypes)alltypes.FieldSchema(name:ti, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ts SIMPLE [(alltypes)alltypes.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.vc SIMPLE [(alltypes)alltypes.FieldSchema(name:vc, type:varchar(5), comment:null), ]
+PREHOOK: query: insert into table alltypes_orc select * from alltypes
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: insert into table alltypes_orc select * from alltypes
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes
+POSTHOOK: Output: default@alltypes_orc
+POSTHOOK: Lineage: alltypes_orc.bi SIMPLE [(alltypes)alltypes.FieldSchema(name:bi, type:bigint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.bo SIMPLE [(alltypes)alltypes.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.c SIMPLE [(alltypes)alltypes.FieldSchema(name:c, type:char(5), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.d SIMPLE [(alltypes)alltypes.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.da SIMPLE [(alltypes)alltypes.FieldSchema(name:da, type:date, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.de SIMPLE [(alltypes)alltypes.FieldSchema(name:de, type:decimal(10,3), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.f SIMPLE [(alltypes)alltypes.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.i SIMPLE [(alltypes)alltypes.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.l SIMPLE [(alltypes)alltypes.FieldSchema(name:l, type:array<int>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.m SIMPLE [(alltypes)alltypes.FieldSchema(name:m, type:map<string,string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.s SIMPLE [(alltypes)alltypes.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.si SIMPLE [(alltypes)alltypes.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.st SIMPLE [(alltypes)alltypes.FieldSchema(name:st, type:struct<c1:int,c2:string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ti SIMPLE [(alltypes)alltypes.FieldSchema(name:ti, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ts SIMPLE [(alltypes)alltypes.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.vc SIMPLE [(alltypes)alltypes.FieldSchema(name:vc, type:varchar(5), comment:null), ]
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: alter table alltypes_orc concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+Found 1 items
+#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/orc_merge9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_merge9.q.out b/ql/src/test/results/clientpositive/llap/orc_merge9.q.out
new file mode 100644
index 0000000..bdf0fd3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_merge9.q.out
@@ -0,0 +1,186 @@
+PREHOOK: query: create table ts_merge (
+userid bigint,
+string1 string,
+subtype double,
+decimal1 decimal(38,18),
+ts timestamp
+) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: create table ts_merge (
+userid bigint,
+string1 string,
+subtype double,
+decimal1 decimal(38,18),
+ts timestamp
+) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@ts_merge
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' overwrite into table ts_merge
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' overwrite into table ts_merge
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@ts_merge
+PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table ts_merge
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table ts_merge
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@ts_merge
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from ts_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from ts_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+50000
+PREHOOK: query: alter table ts_merge concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@ts_merge
+PREHOOK: Output: default@ts_merge
+POSTHOOK: query: alter table ts_merge concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@ts_merge
+POSTHOOK: Output: default@ts_merge
+PREHOOK: query: select count(*) from ts_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from ts_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@ts_merge
+#### A masked pattern was here ####
+50000
+Found 1 items
+#### A masked pattern was here ####
+PREHOOK: query: -- incompatible merge test (stripe statistics missing)
+
+create table a_merge like alltypesorc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: -- incompatible merge test (stripe statistics missing)
+
+create table a_merge like alltypesorc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@a_merge
+PREHOOK: query: insert overwrite table a_merge select * from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: insert overwrite table a_merge select * from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@a_merge
+POSTHOOK: Lineage: a_merge.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: a_merge.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: a_merge.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: a_merge.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table a_merge
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table a_merge
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@a_merge
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+24576
+PREHOOK: query: alter table a_merge concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@a_merge
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: alter table a_merge concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@a_merge
+POSTHOOK: Output: default@a_merge
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+24576
+Found 2 items
+#### A masked pattern was here ####
+PREHOOK: query: insert into table a_merge select * from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: insert into table a_merge select * from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@a_merge
+POSTHOOK: Lineage: a_merge.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: a_merge.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: a_merge.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: a_merge.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: a_merge.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: a_merge.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+Found 3 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+36864
+PREHOOK: query: alter table a_merge concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@a_merge
+PREHOOK: Output: default@a_merge
+POSTHOOK: query: alter table a_merge concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@a_merge
+POSTHOOK: Output: default@a_merge
+PREHOOK: query: select count(*) from a_merge
+PREHOOK: type: QUERY
+PREHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from a_merge
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@a_merge
+#### A masked pattern was here ####
+36864
+Found 2 items
+#### A masked pattern was here ####
[17/51] [partial] hive git commit: HIVE-11776: LLAP: Generate golden
files for all MiniLlapCluster tests (Prasanth Jayachandran)
Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ace87818/ql/src/test/results/clientpositive/llap/llapdecider.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llapdecider.q.out b/ql/src/test/results/clientpositive/llap/llapdecider.q.out
new file mode 100644
index 0000000..6aa5513
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/llapdecider.q.out
@@ -0,0 +1,1195 @@
+PREHOOK: query: -- simple query with multiple reduce stages
+EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt
+PREHOOK: type: QUERY
+POSTHOOK: query: -- simple query with multiple reduce stages
+EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: bigint)
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint)
+ sort order: +
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string)
+ Reducer 3
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: create table src_orc stored as orc as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc stored as orc as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src_orc
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Reducer 3
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src_orc
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint)
+ sort order: +
+ Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
+ value expressions: _col0 (type: string)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 250 Data size: 44000 Basic stats: COMPLETE Column stats: NONE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: analyze table src_orc compute statistics for columns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_orc
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table src_orc compute statistics for columns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_orc
+#### A masked pattern was here ####
+PREHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src_orc
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: count(_col1)
+ keys: _col0 (type: string)
+ mode: hash
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: count(VALUE._col0)
+ keys: KEY._col0 (type: string)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col1 (type: bigint)
+ sort order: +
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT * from src_orc join src on (src_orc.key = src.key) order by src.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * from src_orc join src on (src_orc.key = src.key) order by src.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src_orc
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Reducer 2
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Reducer 3
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT * from src_orc join src on (src_orc.key = src.key) order by src.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * from src_orc join src on (src_orc.key = src.key) order by src.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src_orc
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: src
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Reducer 2
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Reducer 3
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Reducer 2
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Reducer 3
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Reducer 3
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Reducer 2
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Reducer 3
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by s2.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Map 4
+ Map Operator Tree:
+ TableScan
+ alias: s1
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
+ Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: llap
+ Reduce Operator Tree:
+ Merge Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: string)
+ 1 _col0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col3 (type: string)
+ sort order: +
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+ Reducer 3
+ Execution mode: uber
+ Reduce Operator Tree:
+ Select Operator
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)
+ outputColumnNames: _col0, _col1, _col2, _col3
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: CREATE TEMPORARY FUNCTION test_udf_get_java_string AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaString'
+PREHOOK: type: CREATEFUNCTION
+PREHOOK: Output: test_udf_get_java_string
+POSTHOOK: query: CREATE TEMPORARY FUNCTION test_udf_get_java_string AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaString'
+POSTHOOK: type: CREATEFUNCTION
+POSTHOOK: Output: test_udf_get_java_string
+PREHOOK: query: EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc where cast(key as int) > 1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc where cast(key as int) > 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src_orc
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: (UDFToInteger(key) > 1) (type: boolean)
+ Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: (UDFToInteger(key) + 1) (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: sum(_col0)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Execution mode: llap
+ Reducer 2
+ Execution mode: uber
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) + 1) from src_orc where cast(key as int) > 1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) + 1) from src_orc where cast(key as int) > 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src_orc
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: (UDFToInteger(key) > 1) (type: boolean)
+ Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: (UDFToInteger(GenericUDFTestGetJavaString(key)) + 1) (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: sum(_col0)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc where cast(test_udf_get_java_string(cast(key as string)) as int) > 1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc where cast(test_udf_get_java_string(cast(key as string)) as int) > 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+ Edges:
+ Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: src_orc
+ Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: (UDFToInteger(GenericUDFTestGetJavaString(key)) > 1) (type: boolean)
+ Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: (UDFToInteger(key) + 1) (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 166 Data size: 14442 Basic stats: COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: sum(_col0)
+ mode: hash
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ sort order:
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: bigint)
+ Reducer 2
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: sum(VALUE._col0)
+ mode: mergepartial
+ outputColumnNames: _col0
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+ table:
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+