You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sp...@apache.org on 2016/05/27 15:38:01 UTC
[40/48] hive git commit: HIVE-13549: Remove jdk version specific out
files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out
deleted file mode 100644
index 5c40dc4..0000000
--- a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out
+++ /dev/null
@@ -1,693 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM
- src a
- FULL OUTER JOIN
- srcpart b
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM
- src a
- FULL OUTER JOIN
- srcpart b
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- GatherStats: false
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: key, value
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: key (type: string)
- null sort order: a
- sort order: +
- Map-reduce partition columns: key (type: string)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- tag: 0
- value expressions: value (type: string)
- auto parallelism: false
- TableScan
- alias: b
- Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
- GatherStats: false
- Select Operator
- expressions: key (type: string), value (type: string), ds (type: string)
- outputColumnNames: key, value, ds
- Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: key (type: string)
- null sort order: a
- sort order: +
- Map-reduce partition columns: key (type: string)
- Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
- tag: 1
- value expressions: value (type: string), ds (type: string)
- auto parallelism: false
- Path -> Alias:
-#### A masked pattern was here ####
- Path -> Partition:
-#### A masked pattern was here ####
- Partition
- base file name: src
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.src
- numFiles 1
- numRows 500
- rawDataSize 5312
- serialization.ddl struct src { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.src
- numFiles 1
- numRows 500
- rawDataSize 5312
- serialization.ddl struct src { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.src
- name: default.src
-#### A masked pattern was here ####
- Partition
- base file name: hr=11
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- partition values:
- ds 2008-04-08
- hr 11
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- numFiles 1
- numRows 500
- partition_columns ds/hr
- partition_columns.types string:string
- rawDataSize 5312
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- partition_columns ds/hr
- partition_columns.types string:string
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.srcpart
- name: default.srcpart
-#### A masked pattern was here ####
- Partition
- base file name: hr=12
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- partition values:
- ds 2008-04-08
- hr 12
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- numFiles 1
- numRows 500
- partition_columns ds/hr
- partition_columns.types string:string
- rawDataSize 5312
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- partition_columns ds/hr
- partition_columns.types string:string
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.srcpart
- name: default.srcpart
-#### A masked pattern was here ####
- Partition
- base file name: hr=11
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- partition values:
- ds 2008-04-09
- hr 11
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- numFiles 1
- numRows 500
- partition_columns ds/hr
- partition_columns.types string:string
- rawDataSize 5312
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- partition_columns ds/hr
- partition_columns.types string:string
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.srcpart
- name: default.srcpart
-#### A masked pattern was here ####
- Partition
- base file name: hr=12
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- partition values:
- ds 2008-04-09
- hr 12
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- numFiles 1
- numRows 500
- partition_columns ds/hr
- partition_columns.types string:string
- rawDataSize 5312
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- partition_columns ds/hr
- partition_columns.types string:string
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.srcpart
- name: default.srcpart
- Truncated Path -> Alias:
- /src [a]
- /srcpart/ds=2008-04-08/hr=11 [b]
- /srcpart/ds=2008-04-08/hr=12 [b]
- /srcpart/ds=2008-04-09/hr=11 [b]
- /srcpart/ds=2008-04-09/hr=12 [b]
- Needs Tagging: true
- Reduce Operator Tree:
- Join Operator
- condition map:
- Outer Join 0 to 1
- filter mappings:
- 1 [0, 1]
- filter predicates:
- 0
- 1 {(VALUE.ds = '2008-04-08')}
- keys:
- 0 key (type: string)
- 1 key (type: string)
- outputColumnNames: key, value, key0, value0
- Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
- outputColumnNames: key, value, key0, value0
- Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- isSamplingPred: false
- predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key0) > 15.0) and (UDFToDouble(key0) < 25.0)) (type: boolean)
- Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- GlobalTableId: 0
-#### A masked pattern was here ####
- NumFilesPerFileSink: 1
- Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- properties:
- columns key,value,key0,value0
- columns.types string:string:string:string
- escape.delim \
- hive.serialization.extend.additional.nesting.levels true
- serialization.escape.crlf true
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- TotalFiles: 1
- GatherStats: false
- MultiFileSpray: false
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: FROM
- src a
- FULL OUTER JOIN
- srcpart b
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM
- src a
- FULL OUTER JOIN
- srcpart b
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-17 val_17 17 val_17
-17 val_17 17 val_17
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-19 val_19 19 val_19
-19 val_19 19 val_19
-PREHOOK: query: EXPLAIN EXTENDED
- FROM
- src a
- FULL OUTER JOIN
- srcpart b
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
- FROM
- src a
- FULL OUTER JOIN
- srcpart b
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- GatherStats: false
- Filter Operator
- isSamplingPred: false
- predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: key, value
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: key (type: string)
- null sort order: a
- sort order: +
- Map-reduce partition columns: key (type: string)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- tag: 0
- value expressions: value (type: string)
- auto parallelism: false
- TableScan
- alias: b
- Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
- GatherStats: false
- Filter Operator
- isSamplingPred: false
- predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
- Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: key, value
- Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: key (type: string)
- null sort order: a
- sort order: +
- Map-reduce partition columns: key (type: string)
- Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
- tag: 1
- value expressions: value (type: string)
- auto parallelism: false
- Path -> Alias:
-#### A masked pattern was here ####
- Path -> Partition:
-#### A masked pattern was here ####
- Partition
- base file name: src
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.src
- numFiles 1
- numRows 500
- rawDataSize 5312
- serialization.ddl struct src { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.src
- numFiles 1
- numRows 500
- rawDataSize 5312
- serialization.ddl struct src { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.src
- name: default.src
-#### A masked pattern was here ####
- Partition
- base file name: hr=11
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- partition values:
- ds 2008-04-08
- hr 11
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- numFiles 1
- numRows 500
- partition_columns ds/hr
- partition_columns.types string:string
- rawDataSize 5312
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- partition_columns ds/hr
- partition_columns.types string:string
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.srcpart
- name: default.srcpart
-#### A masked pattern was here ####
- Partition
- base file name: hr=12
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- partition values:
- ds 2008-04-08
- hr 12
- properties:
- COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- numFiles 1
- numRows 500
- partition_columns ds/hr
- partition_columns.types string:string
- rawDataSize 5312
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- totalSize 5812
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- properties:
- bucket_count -1
- columns key,value
- columns.comments 'default','default'
- columns.types string:string
-#### A masked pattern was here ####
- name default.srcpart
- partition_columns ds/hr
- partition_columns.types string:string
- serialization.ddl struct srcpart { string key, string value}
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.srcpart
- name: default.srcpart
- Truncated Path -> Alias:
- /src [a]
- /srcpart/ds=2008-04-08/hr=11 [b]
- /srcpart/ds=2008-04-08/hr=12 [b]
- Needs Tagging: true
- Reduce Operator Tree:
- Join Operator
- condition map:
- Right Outer Join0 to 1
- keys:
- 0 key (type: string)
- 1 key (type: string)
- outputColumnNames: key, value, key0, value0
- Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
- outputColumnNames: key, value, key0, value0
- Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- isSamplingPred: false
- predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
- Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- GlobalTableId: 0
-#### A masked pattern was here ####
- NumFilesPerFileSink: 1
- Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- properties:
- columns key,value,key0,value0
- columns.types string:string:string:string
- escape.delim \
- hive.serialization.extend.additional.nesting.levels true
- serialization.escape.crlf true
- serialization.format 1
- serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- TotalFiles: 1
- GatherStats: false
- MultiFileSpray: false
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: FROM
- src a
- FULL OUTER JOIN
- srcpart b
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM
- src a
- FULL OUTER JOIN
- srcpart b
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-17 val_17 17 val_17
-17 val_17 17 val_17
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-18 val_18 18 val_18
-19 val_19 19 val_19
-19 val_19 19 val_19
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
new file mode 100644
index 0000000..200b8ee
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
@@ -0,0 +1,691 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM
+ src a
+ FULL OUTER JOIN
+ srcpart b
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM
+ src a
+ FULL OUTER JOIN
+ srcpart b
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: key, value
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ null sort order: a
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ value expressions: value (type: string)
+ auto parallelism: false
+ TableScan
+ alias: b
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Select Operator
+ expressions: key (type: string), value (type: string), ds (type: string)
+ outputColumnNames: key, value, ds
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ null sort order: a
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ value expressions: value (type: string), ds (type: string)
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: src
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src
+ name: default.src
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=11
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ hr 11
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=12
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ hr 12
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=11
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ hr 11
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=12
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-09
+ hr 12
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+ Truncated Path -> Alias:
+ /src [a]
+ /srcpart/ds=2008-04-08/hr=11 [b]
+ /srcpart/ds=2008-04-08/hr=12 [b]
+ /srcpart/ds=2008-04-09/hr=11 [b]
+ /srcpart/ds=2008-04-09/hr=12 [b]
+ Needs Tagging: true
+ Reduce Operator Tree:
+ Join Operator
+ condition map:
+ Outer Join 0 to 1
+ filter mappings:
+ 1 [0, 1]
+ filter predicates:
+ 0
+ 1 {(VALUE.ds = '2008-04-08')}
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ outputColumnNames: key, value, key0, value0
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
+ outputColumnNames: key, value, key0, value0
+ Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ isSamplingPred: false
+ predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key0) > 15.0) and (UDFToDouble(key0) < 25.0)) (type: boolean)
+ Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns key,value,key0,value0
+ columns.types string:string:string:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.escape.crlf true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+ src a
+ FULL OUTER JOIN
+ srcpart b
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+ src a
+ FULL OUTER JOIN
+ srcpart b
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+17 val_17 17 val_17
+17 val_17 17 val_17
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+19 val_19 19 val_19
+19 val_19 19 val_19
+PREHOOK: query: EXPLAIN EXTENDED
+ FROM
+ src a
+ FULL OUTER JOIN
+ srcpart b
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+ FROM
+ src a
+ FULL OUTER JOIN
+ srcpart b
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+ Stage: Stage-1
+ Map Reduce
+ Map Operator Tree:
+ TableScan
+ alias: a
+ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+ Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: key, value
+ Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ null sort order: a
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+ tag: 0
+ value expressions: value (type: string)
+ auto parallelism: false
+ TableScan
+ alias: b
+ Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Filter Operator
+ isSamplingPred: false
+ predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+ Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: key, value
+ Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+ Reduce Output Operator
+ key expressions: key (type: string)
+ null sort order: a
+ sort order: +
+ Map-reduce partition columns: key (type: string)
+ Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+ tag: 1
+ value expressions: value (type: string)
+ auto parallelism: false
+ Path -> Alias:
+#### A masked pattern was here ####
+ Path -> Partition:
+#### A masked pattern was here ####
+ Partition
+ base file name: src
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.src
+ numFiles 1
+ numRows 500
+ rawDataSize 5312
+ serialization.ddl struct src { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.src
+ name: default.src
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=11
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ hr 11
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+#### A masked pattern was here ####
+ Partition
+ base file name: hr=12
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ ds 2008-04-08
+ hr 12
+ properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ numFiles 1
+ numRows 500
+ partition_columns ds/hr
+ partition_columns.types string:string
+ rawDataSize 5312
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5812
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns key,value
+ columns.comments 'default','default'
+ columns.types string:string
+#### A masked pattern was here ####
+ name default.srcpart
+ partition_columns ds/hr
+ partition_columns.types string:string
+ serialization.ddl struct srcpart { string key, string value}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.srcpart
+ name: default.srcpart
+ Truncated Path -> Alias:
+ /src [a]
+ /srcpart/ds=2008-04-08/hr=11 [b]
+ /srcpart/ds=2008-04-08/hr=12 [b]
+ Needs Tagging: true
+ Reduce Operator Tree:
+ Join Operator
+ condition map:
+ Right Outer Join0 to 1
+ keys:
+ 0 key (type: string)
+ 1 key (type: string)
+ outputColumnNames: key, value, key0, value0
+ Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
+ Select Operator
+ expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
+ outputColumnNames: key, value, key0, value0
+ Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
+ Filter Operator
+ isSamplingPred: false
+ predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+ Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+#### A masked pattern was here ####
+ NumFilesPerFileSink: 1
+ Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ properties:
+ columns key,value,key0,value0
+ columns.types string:string:string:string
+ escape.delim \
+ hive.serialization.extend.additional.nesting.levels true
+ serialization.escape.crlf true
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ TotalFiles: 1
+ GatherStats: false
+ MultiFileSpray: false
+
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Processor Tree:
+ ListSink
+
+PREHOOK: query: FROM
+ src a
+ FULL OUTER JOIN
+ srcpart b
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM
+ src a
+ FULL OUTER JOIN
+ srcpart b
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+17 val_17 17 val_17
+17 val_17 17 val_17
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+18 val_18 18 val_18
+19 val_19 19 val_19
+19 val_19 19 val_19
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/char_udf1.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/char_udf1.q.java1.7.out b/ql/src/test/results/clientpositive/char_udf1.q.java1.7.out
deleted file mode 100644
index ee1c2ae..0000000
--- a/ql/src/test/results/clientpositive/char_udf1.q.java1.7.out
+++ /dev/null
@@ -1,463 +0,0 @@
-PREHOOK: query: drop table char_udf_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table char_udf_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20))
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20))
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@char_udf_1
-PREHOOK: query: insert overwrite table char_udf_1
- select key, value, key, value from src where key = '238' limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: insert overwrite table char_udf_1
- select key, value, key, value from src where key = '238' limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@char_udf_1
-POSTHOOK: Lineage: char_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with char support
-select
- concat(c1, c2),
- concat(c3, c4),
- concat(c1, c2) = concat(c3, c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with char support
-select
- concat(c1, c2),
- concat(c3, c4),
- concat(c1, c2) = concat(c3, c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-238val_238 238val_238 true
-PREHOOK: query: select
- upper(c2),
- upper(c4),
- upper(c2) = upper(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- upper(c2),
- upper(c4),
- upper(c2) = upper(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-VAL_238 VAL_238 true
-PREHOOK: query: select
- lower(c2),
- lower(c4),
- lower(c2) = lower(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- lower(c2),
- lower(c4),
- lower(c2) = lower(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238 val_238 true
-PREHOOK: query: -- Scalar UDFs
-select
- ascii(c2),
- ascii(c4),
- ascii(c2) = ascii(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Scalar UDFs
-select
- ascii(c2),
- ascii(c4),
- ascii(c2) = ascii(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-118 118 true
-PREHOOK: query: select
- concat_ws('|', c1, c2),
- concat_ws('|', c3, c4),
- concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- concat_ws('|', c1, c2),
- concat_ws('|', c3, c4),
- concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-238|val_238 238|val_238 true
-PREHOOK: query: select
- decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
- decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
- decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
- decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
- decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238 val_238 true
-PREHOOK: query: select
- instr(c2, '_'),
- instr(c4, '_'),
- instr(c2, '_') = instr(c4, '_')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- instr(c2, '_'),
- instr(c4, '_'),
- instr(c2, '_') = instr(c4, '_')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-4 4 true
-PREHOOK: query: select
- length(c2),
- length(c4),
- length(c2) = length(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- length(c2),
- length(c4),
- length(c2) = length(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-7 7 true
-PREHOOK: query: select
- locate('a', 'abcdabcd', 3),
- locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3),
- locate('a', 'abcdabcd', 3) = locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- locate('a', 'abcdabcd', 3),
- locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3),
- locate('a', 'abcdabcd', 3) = locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-5 5 true
-PREHOOK: query: select
- lpad(c2, 15, ' '),
- lpad(c4, 15, ' '),
- lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- lpad(c2, 15, ' '),
- lpad(c4, 15, ' '),
- lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
- val_238 val_238 true
-PREHOOK: query: select
- ltrim(c2),
- ltrim(c4),
- ltrim(c2) = ltrim(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- ltrim(c2),
- ltrim(c4),
- ltrim(c2) = ltrim(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238 val_238 true
-PREHOOK: query: -- In hive wiki page https://cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF
--- we only allow A regexp B, not regexp (A,B).
-
-select
- c2 regexp 'val',
- c4 regexp 'val',
- (c2 regexp 'val') = (c4 regexp 'val')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- In hive wiki page https://cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF
--- we only allow A regexp B, not regexp (A,B).
-
-select
- c2 regexp 'val',
- c4 regexp 'val',
- (c2 regexp 'val') = (c4 regexp 'val')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-true true true
-PREHOOK: query: select
- regexp_extract(c2, 'val_([0-9]+)', 1),
- regexp_extract(c4, 'val_([0-9]+)', 1),
- regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- regexp_extract(c2, 'val_([0-9]+)', 1),
- regexp_extract(c4, 'val_([0-9]+)', 1),
- regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-238 238 true
-PREHOOK: query: select
- regexp_replace(c2, 'val', 'replaced'),
- regexp_replace(c4, 'val', 'replaced'),
- regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- regexp_replace(c2, 'val', 'replaced'),
- regexp_replace(c4, 'val', 'replaced'),
- regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-replaced_238 replaced_238 true
-PREHOOK: query: select
- reverse(c2),
- reverse(c4),
- reverse(c2) = reverse(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- reverse(c2),
- reverse(c4),
- reverse(c2) = reverse(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-832_lav 832_lav true
-PREHOOK: query: select
- rpad(c2, 15, ' '),
- rpad(c4, 15, ' '),
- rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- rpad(c2, 15, ' '),
- rpad(c4, 15, ' '),
- rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238 val_238 true
-PREHOOK: query: select
- rtrim(c2),
- rtrim(c4),
- rtrim(c2) = rtrim(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- rtrim(c2),
- rtrim(c4),
- rtrim(c2) = rtrim(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238 val_238 true
-PREHOOK: query: select
- sentences('See spot run. See jane run.'),
- sentences(cast('See spot run. See jane run.' as char(50)))
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- sentences('See spot run. See jane run.'),
- sentences(cast('See spot run. See jane run.' as char(50)))
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-[["See","spot","run"],["See","jane","run"]] [["See","spot","run"],["See","jane","run"]]
-PREHOOK: query: select
- split(c2, '_'),
- split(c4, '_')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- split(c2, '_'),
- split(c4, '_')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-["val","238"] ["val","238"]
-PREHOOK: query: select
- str_to_map('a:1,b:2,c:3',',',':'),
- str_to_map(cast('a:1,b:2,c:3' as char(20)),',',':')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- str_to_map('a:1,b:2,c:3',',',':'),
- str_to_map(cast('a:1,b:2,c:3' as char(20)),',',':')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-{"b":"2","a":"1","c":"3"} {"b":"2","a":"1","c":"3"}
-PREHOOK: query: select
- substr(c2, 1, 3),
- substr(c4, 1, 3),
- substr(c2, 1, 3) = substr(c4, 1, 3)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- substr(c2, 1, 3),
- substr(c4, 1, 3),
- substr(c2, 1, 3) = substr(c4, 1, 3)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val val true
-PREHOOK: query: select
- trim(c2),
- trim(c4),
- trim(c2) = trim(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- trim(c2),
- trim(c4),
- trim(c2) = trim(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238 val_238 true
-PREHOOK: query: -- Aggregate Functions
-select
- compute_stats(c2, 16),
- compute_stats(c4, 16)
-from char_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Aggregate Functions
-select
- compute_stats(c2, 16),
- compute_stats(c4, 16)
-from char_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"} {"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}
-PREHOOK: query: select
- min(c2),
- min(c4)
-from char_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- min(c2),
- min(c4)
-from char_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238 val_238
-PREHOOK: query: select
- max(c2),
- max(c4)
-from char_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
- max(c2),
- max(c4)
-from char_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238 val_238
-PREHOOK: query: drop table char_udf_1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@char_udf_1
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: drop table char_udf_1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@char_udf_1
-POSTHOOK: Output: default@char_udf_1