You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/05/02 01:06:39 UTC
svn commit: r1591796 - in /hive/trunk: data/files/
ql/src/java/org/apache/hadoop/hive/ql/exec/
ql/src/test/queries/clientpositive/ ql/src/test/results/clientpositive/
ql/src/test/results/clientpositive/tez/
Author: xuefu
Date: Thu May 1 23:06:39 2014
New Revision: 1591796
URL: http://svn.apache.org/r1591796
Log:
HIVE-6984: Analyzing partitioned table with NULL values for the partition column failed with NPE (reviewed by Sergey)
Added:
hive/trunk/data/files/test1.txt
hive/trunk/ql/src/test/queries/clientpositive/analyze_table_null_partition.q
hive/trunk/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out
Modified:
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/count.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/mrr.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/tez_dml.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/tez_union.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/union2.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/union3.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/union5.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/union7.q.out
hive/trunk/ql/src/test/results/clientpositive/tez/union9.q.out
Added: hive/trunk/data/files/test1.txt
URL: http://svn.apache.org/viewvc/hive/trunk/data/files/test1.txt?rev=1591796&view=auto
==============================================================================
--- hive/trunk/data/files/test1.txt (added)
+++ hive/trunk/data/files/test1.txt Thu May 1 23:06:39 2014
@@ -0,0 +1,5 @@
+tom15
+john
+mayr40
+30
+
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java Thu May 1 23:06:39 2014
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
@@ -65,6 +66,8 @@ public class TableScanOperator extends O
private transient int rowLimit = -1;
private transient int currCount = 0;
+ private String defaultPartitionName;
+
public TableDesc getTableDesc() {
return tableDesc;
}
@@ -145,8 +148,9 @@ public class TableScanOperator extends O
(StructObjectInspector) inputObjInspectors[0], ObjectInspectorCopyOption.WRITABLE);
for (Object o : writable) {
- assert (o != null && o.toString().length() > 0);
- values.add(o.toString());
+ // It's possible that a parition column may have NULL value, in which case the row belongs
+ // to the special partition, __HIVE_DEFAULT_PARTITION__.
+ values.add(o == null ? defaultPartitionName : o.toString());
}
partitionSpecs = FileUtils.makePartName(conf.getPartColumns(), values);
LOG.info("Stats Gathering found a new partition spec = " + partitionSpecs);
@@ -205,6 +209,7 @@ public class TableScanOperator extends O
jc = new JobConf(hconf);
}
+ defaultPartitionName = HiveConf.getVar(hconf, HiveConf.ConfVars.DEFAULTPARTITIONNAME);
currentStat = null;
stats = new HashMap<String, Stat>();
if (conf.getPartColumns() == null || conf.getPartColumns().size() == 0) {
Added: hive/trunk/ql/src/test/queries/clientpositive/analyze_table_null_partition.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/analyze_table_null_partition.q?rev=1591796&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/analyze_table_null_partition.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/analyze_table_null_partition.q Thu May 1 23:06:39 2014
@@ -0,0 +1,21 @@
+SET hive.exec.dynamic.partition.mode=nonstrict;
+
+DROP TABLE IF EXISTS test1;
+DROP TABLE IF EXISTS test2;
+
+CREATE TABLE test1(name string, age int);
+CREATE TABLE test2(name string) PARTITIONED by (age int);
+
+LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1;
+FROM test1 INSERT OVERWRITE TABLE test2 PARTITION(age) SELECT test1.name, test1.age;
+
+ANALYZE TABLE test2 PARTITION(age) COMPUTE STATISTICS;
+
+-- To show stats. It doesn't show due to a bug.
+DESC EXTENDED test2;
+
+-- Another way to show stats.
+EXPLAIN EXTENDED select * from test2;
+
+DROP TABLE test1;
+DROP TABLE test2;
Added: hive/trunk/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out?rev=1591796&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out Thu May 1 23:06:39 2014
@@ -0,0 +1,335 @@
+PREHOOK: query: DROP TABLE IF EXISTS test1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS test1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS test2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS test2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE test1(name string, age int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: CREATE TABLE test1(name string, age int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test1
+PREHOOK: query: CREATE TABLE test2(name string) PARTITIONED by (age int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: CREATE TABLE test2(name string) PARTITIONED by (age int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test2
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@test1
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/test1.txt' INTO TABLE test1
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@test1
+PREHOOK: query: FROM test1 INSERT OVERWRITE TABLE test2 PARTITION(age) SELECT test1.name, test1.age
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test1
+PREHOOK: Output: default@test2
+POSTHOOK: query: FROM test1 INSERT OVERWRITE TABLE test2 PARTITION(age) SELECT test1.name, test1.age
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test1
+POSTHOOK: Output: default@test2@age=15
+POSTHOOK: Output: default@test2@age=30
+POSTHOOK: Output: default@test2@age=40
+POSTHOOK: Output: default@test2@age=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: test2 PARTITION(age=15).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=30).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=40).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=__HIVE_DEFAULT_PARTITION__).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+PREHOOK: query: ANALYZE TABLE test2 PARTITION(age) COMPUTE STATISTICS
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test2
+PREHOOK: Input: default@test2@age=15
+PREHOOK: Input: default@test2@age=30
+PREHOOK: Input: default@test2@age=40
+PREHOOK: Input: default@test2@age=__HIVE_DEFAULT_PARTITION__
+PREHOOK: Output: default@test2
+PREHOOK: Output: default@test2@age=15
+PREHOOK: Output: default@test2@age=30
+PREHOOK: Output: default@test2@age=40
+PREHOOK: Output: default@test2@age=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: query: ANALYZE TABLE test2 PARTITION(age) COMPUTE STATISTICS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test2
+POSTHOOK: Input: default@test2@age=15
+POSTHOOK: Input: default@test2@age=30
+POSTHOOK: Input: default@test2@age=40
+POSTHOOK: Input: default@test2@age=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Output: default@test2
+POSTHOOK: Output: default@test2@age=15
+POSTHOOK: Output: default@test2@age=30
+POSTHOOK: Output: default@test2@age=40
+POSTHOOK: Output: default@test2@age=__HIVE_DEFAULT_PARTITION__
+POSTHOOK: Lineage: test2 PARTITION(age=15).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=30).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=40).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=__HIVE_DEFAULT_PARTITION__).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+PREHOOK: query: -- To show stats. It doesn't show due to a bug.
+DESC EXTENDED test2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test2
+POSTHOOK: query: -- To show stats. It doesn't show due to a bug.
+DESC EXTENDED test2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test2
+POSTHOOK: Lineage: test2 PARTITION(age=15).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=30).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=40).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=__HIVE_DEFAULT_PARTITION__).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+name string
+age int
+
+# Partition Information
+# col_name data_type comment
+
+age int
+
+#### A masked pattern was here ####
+PREHOOK: query: -- Another way to show stats.
+EXPLAIN EXTENDED select * from test2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Another way to show stats.
+EXPLAIN EXTENDED select * from test2
+POSTHOOK: type: QUERY
+POSTHOOK: Lineage: test2 PARTITION(age=15).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=30).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=40).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=__HIVE_DEFAULT_PARTITION__).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+ABSTRACT SYNTAX TREE:
+
+TOK_QUERY
+ TOK_FROM
+ TOK_TABREF
+ TOK_TABNAME
+ test2
+ TOK_INSERT
+ TOK_DESTINATION
+ TOK_DIR
+ TOK_TMP_FILE
+ TOK_SELECT
+ TOK_SELEXPR
+ TOK_ALLCOLREF
+
+
+STAGE DEPENDENCIES:
+ Stage-0 is a root stage
+
+STAGE PLANS:
+ Stage: Stage-0
+ Fetch Operator
+ limit: -1
+ Partition Description:
+ Partition
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ age 15
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns name
+ columns.comments
+ columns.types string
+#### A masked pattern was here ####
+ name default.test2
+ numFiles 1
+ numRows 1
+ partition_columns age
+ partition_columns.types int
+ rawDataSize 3
+ serialization.ddl struct test2 { string name}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 4
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns name
+ columns.comments
+ columns.types string
+#### A masked pattern was here ####
+ name default.test2
+ partition_columns age
+ partition_columns.types int
+ serialization.ddl struct test2 { string name}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test2
+ name: default.test2
+ Partition
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ age 30
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns name
+ columns.comments
+ columns.types string
+#### A masked pattern was here ####
+ name default.test2
+ numFiles 1
+ numRows 1
+ partition_columns age
+ partition_columns.types int
+ rawDataSize 0
+ serialization.ddl struct test2 { string name}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 1
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns name
+ columns.comments
+ columns.types string
+#### A masked pattern was here ####
+ name default.test2
+ partition_columns age
+ partition_columns.types int
+ serialization.ddl struct test2 { string name}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test2
+ name: default.test2
+ Partition
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ age 40
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns name
+ columns.comments
+ columns.types string
+#### A masked pattern was here ####
+ name default.test2
+ numFiles 1
+ numRows 1
+ partition_columns age
+ partition_columns.types int
+ rawDataSize 4
+ serialization.ddl struct test2 { string name}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 5
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns name
+ columns.comments
+ columns.types string
+#### A masked pattern was here ####
+ name default.test2
+ partition_columns age
+ partition_columns.types int
+ serialization.ddl struct test2 { string name}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test2
+ name: default.test2
+ Partition
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ partition values:
+ age __HIVE_DEFAULT_PARTITION__
+ properties:
+ COLUMN_STATS_ACCURATE true
+ bucket_count -1
+ columns name
+ columns.comments
+ columns.types string
+#### A masked pattern was here ####
+ name default.test2
+ numFiles 1
+ numRows 2
+ partition_columns age
+ partition_columns.types int
+ rawDataSize 4
+ serialization.ddl struct test2 { string name}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ totalSize 6
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ input format: org.apache.hadoop.mapred.TextInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+ properties:
+ bucket_count -1
+ columns name
+ columns.comments
+ columns.types string
+#### A masked pattern was here ####
+ name default.test2
+ partition_columns age
+ partition_columns.types int
+ serialization.ddl struct test2 { string name}
+ serialization.format 1
+ serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ name: default.test2
+ name: default.test2
+ Processor Tree:
+ TableScan
+ alias: test2
+ Statistics: Num rows: 5 Data size: 111 Basic stats: COMPLETE Column stats: NONE
+ GatherStats: false
+ Select Operator
+ expressions: name (type: string), age (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 5 Data size: 111 Basic stats: COMPLETE Column stats: NONE
+ ListSink
+
+PREHOOK: query: DROP TABLE test1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test1
+PREHOOK: Output: default@test1
+POSTHOOK: query: DROP TABLE test1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test1
+POSTHOOK: Output: default@test1
+POSTHOOK: Lineage: test2 PARTITION(age=15).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=30).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=40).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=__HIVE_DEFAULT_PARTITION__).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+PREHOOK: query: DROP TABLE test2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test2
+PREHOOK: Output: default@test2
+POSTHOOK: query: DROP TABLE test2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test2
+POSTHOOK: Output: default@test2
+POSTHOOK: Lineage: test2 PARTITION(age=15).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=30).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=40).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
+POSTHOOK: Lineage: test2 PARTITION(age=__HIVE_DEFAULT_PARTITION__).name SIMPLE [(test1)test1.FieldSchema(name:name, type:string, comment:null), ]
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out Thu May 1 23:06:39 2014
@@ -535,7 +535,7 @@ STAGE PLANS:
Stage: Stage-1
Tez
Edges:
- Map 3 <- Map 1 (CUSTOM_EDGE), Map 2 (CUSTOM_EDGE)
+ Map 3 <- Map 2 (CUSTOM_EDGE), Map 1 (CUSTOM_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out Thu May 1 23:06:39 2014
@@ -124,7 +124,7 @@ STAGE PLANS:
Stage: Stage-1
Tez
Edges:
- Map 3 <- Map 2 (CUSTOM_EDGE), Map 1 (BROADCAST_EDGE)
+ Map 3 <- Map 1 (BROADCAST_EDGE), Map 2 (CUSTOM_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/count.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/count.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/count.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/count.q.out Thu May 1 23:06:39 2014
@@ -63,7 +63,7 @@ STAGE PLANS:
sort order: +++
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
+ value expressions: _col5 (type: bigint)
Reducer 2
Reduce Operator Tree:
Group By Operator
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out Thu May 1 23:06:39 2014
@@ -481,7 +481,6 @@ STAGE PLANS:
Map-reduce partition columns: _col0 (type: tinyint)
Statistics: Num rows: 31436 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
TopN Hash Memory Usage: 0.3
- value expressions: _col2 (type: bigint)
Reducer 2
Reduce Operator Tree:
Group By Operator
@@ -577,7 +576,6 @@ STAGE PLANS:
Map-reduce partition columns: _col0 (type: tinyint)
Statistics: Num rows: 1849 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
TopN Hash Memory Usage: 0.3
- value expressions: _col3 (type: bigint), _col4 (type: bigint)
Reducer 2
Reduce Operator Tree:
Group By Operator
@@ -981,8 +979,8 @@ STAGE PLANS:
alias: src
Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
Select Operator
- expressions: value (type: string), key (type: string)
- outputColumnNames: value, key
+ expressions: key (type: string), value (type: string)
+ outputColumnNames: key, value
Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: value (type: string)
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out Thu May 1 23:06:39 2014
@@ -1,7 +1,9 @@
PREHOOK: query: show partitions srcpart
PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@srcpart
POSTHOOK: query: show partitions srcpart
POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@srcpart
ds=2008-04-08/hr=11
ds=2008-04-08/hr=12
ds=2008-04-09/hr=11
@@ -22,8 +24,10 @@ POSTHOOK: Output: database:default
POSTHOOK: Output: default@nzhang_part2
PREHOOK: query: describe extended nzhang_part1
PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@nzhang_part1
POSTHOOK: query: describe extended nzhang_part1
POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@nzhang_part1
key string default
value string default
ds string
@@ -187,8 +191,10 @@ POSTHOOK: Lineage: nzhang_part2 PARTITIO
POSTHOOK: Lineage: nzhang_part2 PARTITION(ds=2008-12-31,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
PREHOOK: query: show partitions nzhang_part1
PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@nzhang_part1
POSTHOOK: query: show partitions nzhang_part1
POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@nzhang_part1
POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
@@ -201,8 +207,10 @@ ds=2008-04-08/hr=11
ds=2008-04-08/hr=12
PREHOOK: query: show partitions nzhang_part2
PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@nzhang_part2
POSTHOOK: query: show partitions nzhang_part2
POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@nzhang_part2
POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: nzhang_part1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/mrr.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/mrr.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/mrr.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/mrr.q.out Thu May 1 23:06:39 2014
@@ -452,7 +452,6 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: bigint)
Reducer 3
Reduce Operator Tree:
Group By Operator
@@ -864,7 +863,6 @@ STAGE PLANS:
sort order: ++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: bigint)
Reducer 3
Reduce Operator Tree:
Group By Operator
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/tez_dml.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/tez_dml.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/tez_dml.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/tez_dml.q.out Thu May 1 23:06:39 2014
@@ -879,10 +879,10 @@ POSTHOOK: Lineage: tmp_src_part PARTITIO
STAGE DEPENDENCIES:
Stage-2 is a root stage
Stage-3 depends on stages: Stage-2
- Stage-1 depends on stages: Stage-3
- Stage-4 depends on stages: Stage-1
Stage-0 depends on stages: Stage-3
- Stage-5 depends on stages: Stage-0
+ Stage-4 depends on stages: Stage-0
+ Stage-1 depends on stages: Stage-3
+ Stage-5 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-2
@@ -928,7 +928,7 @@ STAGE PLANS:
Stage: Stage-3
Dependency Collection
- Stage: Stage-1
+ Stage: Stage-0
Move Operator
tables:
replace: false
@@ -936,12 +936,12 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.odd
+ name: default.even
Stage: Stage-4
Stats-Aggr Operator
- Stage: Stage-0
+ Stage: Stage-1
Move Operator
tables:
replace: false
@@ -949,7 +949,7 @@ STAGE PLANS:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.even
+ name: default.odd
Stage: Stage-5
Stats-Aggr Operator
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/tez_union.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/tez_union.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/tez_union.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/tez_union.q.out Thu May 1 23:06:39 2014
@@ -159,8 +159,8 @@ STAGE PLANS:
TableScan
alias: src
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
+ expressions: key (type: string)
+ outputColumnNames: _col0
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
@@ -170,8 +170,8 @@ STAGE PLANS:
TableScan
alias: src
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
+ expressions: key (type: string)
+ outputColumnNames: _col0
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
@@ -181,8 +181,8 @@ STAGE PLANS:
TableScan
alias: src
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
+ expressions: key (type: string)
+ outputColumnNames: _col0
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
@@ -192,8 +192,8 @@ STAGE PLANS:
TableScan
alias: src
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
+ expressions: key (type: string)
+ outputColumnNames: _col0
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
@@ -206,9 +206,9 @@ STAGE PLANS:
condition expressions:
0
1
- Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
Select Operator
- Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -317,8 +317,8 @@ STAGE PLANS:
TableScan
alias: src
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
+ expressions: key (type: string)
+ outputColumnNames: _col0
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -343,8 +343,8 @@ STAGE PLANS:
TableScan
alias: src
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
+ expressions: key (type: string)
+ outputColumnNames: _col0
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -446,7 +446,7 @@ STAGE PLANS:
Edges:
Map 2 <- Map 1 (BROADCAST_EDGE), Union 3 (CONTAINS), Map 5 (BROADCAST_EDGE), Map 8 (BROADCAST_EDGE)
Map 7 <- Map 1 (BROADCAST_EDGE), Map 6 (BROADCAST_EDGE), Union 3 (CONTAINS), Map 8 (BROADCAST_EDGE)
- Map 9 <- Map 1 (BROADCAST_EDGE), Map 8 (BROADCAST_EDGE), Union 3 (CONTAINS), Map 10 (BROADCAST_EDGE)
+ Map 9 <- Map 8 (BROADCAST_EDGE), Map 1 (BROADCAST_EDGE), Union 3 (CONTAINS), Map 10 (BROADCAST_EDGE)
Reducer 4 <- Union 3 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
@@ -1074,8 +1074,8 @@ STAGE PLANS:
TableScan
alias: src
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
+ expressions: key (type: string)
+ outputColumnNames: _col0
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
@@ -1096,14 +1096,14 @@ STAGE PLANS:
0 _col0 (type: string)
1 key (type: string)
outputColumnNames: _col0, _col2
- Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: _col0 (type: string), _col2 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -1113,8 +1113,8 @@ STAGE PLANS:
TableScan
alias: src
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
+ expressions: key (type: string)
+ outputColumnNames: _col0
Reduce Output Operator
key expressions: _col0 (type: string)
sort order: +
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/union2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/union2.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/union2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/union2.q.out Thu May 1 23:06:39 2014
@@ -28,8 +28,6 @@ STAGE PLANS:
TableScan
alias: s1
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
Select Operator
Group By Operator
aggregations: count(1)
@@ -43,8 +41,6 @@ STAGE PLANS:
TableScan
alias: s2
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
Select Operator
Group By Operator
aggregations: count(1)
@@ -59,14 +55,14 @@ STAGE PLANS:
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: bigint)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/union3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/union3.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/union3.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/union3.q.out Thu May 1 23:06:39 2014
@@ -54,82 +54,70 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Limit
Number of rows: 1
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Map 4
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Limit
Number of rows: 1
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Map 6
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Limit
Number of rows: 1
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Map 9
Map Operator Tree:
TableScan
alias: src
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Limit
Number of rows: 1
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string), _col1 (type: string)
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Reducer 10
Reduce Operator Tree:
Extract
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Limit
Number of rows: 1
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Select Operator
expressions: 2 (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
value expressions: _col0 (type: int)
Reducer 11
Reduce Operator Tree:
@@ -180,19 +168,19 @@ STAGE PLANS:
Reducer 7
Reduce Operator Tree:
Extract
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Limit
Number of rows: 1
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Select Operator
expressions: 1 (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
value expressions: _col0 (type: int)
Reducer 8
Reduce Operator Tree:
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/union5.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/union5.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/union5.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/union5.q.out Thu May 1 23:06:39 2014
@@ -64,8 +64,8 @@ STAGE PLANS:
mode: mergepartial
outputColumnNames: _col0
Select Operator
- expressions: 'tst1' (type: string), _col0 (type: bigint)
- outputColumnNames: _col0, _col1
+ expressions: 'tst1' (type: string)
+ outputColumnNames: _col0
Select Operator
expressions: _col0 (type: string)
outputColumnNames: _col0
@@ -105,8 +105,8 @@ STAGE PLANS:
mode: mergepartial
outputColumnNames: _col0
Select Operator
- expressions: 'tst2' (type: string), _col0 (type: bigint)
- outputColumnNames: _col0, _col1
+ expressions: 'tst2' (type: string)
+ outputColumnNames: _col0
Select Operator
expressions: _col0 (type: string)
outputColumnNames: _col0
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/union7.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/union7.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/union7.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/union7.q.out Thu May 1 23:06:39 2014
@@ -46,8 +46,8 @@ STAGE PLANS:
TableScan
alias: s2
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
+ expressions: key (type: string)
+ outputColumnNames: _col0
Select Operator
expressions: _col0 (type: string)
outputColumnNames: _col0
@@ -68,8 +68,8 @@ STAGE PLANS:
mode: mergepartial
outputColumnNames: _col0
Select Operator
- expressions: 'tst1' (type: string), UDFToString(_col0) (type: string)
- outputColumnNames: _col0, _col1
+ expressions: 'tst1' (type: string)
+ outputColumnNames: _col0
Select Operator
expressions: _col0 (type: string)
outputColumnNames: _col0
Modified: hive/trunk/ql/src/test/results/clientpositive/tez/union9.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/tez/union9.q.out?rev=1591796&r1=1591795&r2=1591796&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/tez/union9.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/tez/union9.q.out Thu May 1 23:06:39 2014
@@ -31,8 +31,6 @@ STAGE PLANS:
TableScan
alias: s1
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
Select Operator
Group By Operator
aggregations: count(1)
@@ -46,8 +44,6 @@ STAGE PLANS:
TableScan
alias: s2
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
Select Operator
Group By Operator
aggregations: count(1)
@@ -61,8 +57,6 @@ STAGE PLANS:
TableScan
alias: s3
Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
Select Operator
Group By Operator
aggregations: count(1)
@@ -77,14 +71,14 @@ STAGE PLANS:
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: bigint)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat