You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/06/27 02:05:46 UTC
[13/13] hive git commit: HIVE-19532 : fix tests - update some out
files on master-txnstats branch (Sergey Shelukhin)
HIVE-19532 : fix tests - update some out files on master-txnstats branch (Sergey Shelukhin)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/798ff7d2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/798ff7d2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/798ff7d2
Branch: refs/heads/master-txnstats
Commit: 798ff7d2443f4477c9fb02ad871511b152217829
Parents: bdc256e
Author: sergey <se...@apache.org>
Authored: Tue Jun 26 19:04:49 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Tue Jun 26 19:04:49 2018 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 14 +-
.../apache/hadoop/hive/ql/metadata/Hive.java | 6 +-
.../hive/ql/optimizer/StatsOptimizer.java | 2 +-
.../clientpositive/autoColumnStats_4.q.out | 7 +-
.../llap/acid_bucket_pruning.q.out | 14 +-
.../llap/acid_vectorization_original.q.out | 14 +-
.../llap/dynpart_sort_optimization_acid.q.out | 114 +++++------
.../llap/enforce_constraint_notnull.q.out | 24 +--
.../llap/insert_into_default_keyword.q.out | 122 ++++++------
.../insert_values_orig_table_use_metadata.q.out | 196 ++-----------------
.../materialized_view_create_rewrite_3.q.out | 40 ++--
.../materialized_view_create_rewrite_4.q.out | 33 ++--
.../materialized_view_create_rewrite_5.q.out | 51 ++---
...ized_view_create_rewrite_rebuild_dummy.q.out | 40 ++--
.../results/clientpositive/llap/mm_all.q.out | 4 +-
.../results/clientpositive/llap/mm_exim.q.out | 1 +
.../llap/results_cache_invalidation.q.out | 130 ++++++------
.../llap/results_cache_transactional.q.out | 74 +++----
ql/src/test/results/clientpositive/mm_all.q.out | 4 +-
.../results/clientpositive/mm_default.q.out | 2 +-
.../test/results/clientpositive/row__id.q.out | 18 +-
.../results/clientpositive/stats_nonpart.q.out | 3 +-
.../results/clientpositive/stats_part.q.out | 9 +-
.../results/clientpositive/stats_part2.q.out | 46 +++--
.../results/clientpositive/stats_sizebug.q.out | 5 +-
.../tez/acid_vectorization_original_tez.q.out | 14 +-
.../clientpositive/tez/explainanalyze_5.q.out | 10 +-
27 files changed, 438 insertions(+), 559 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index e67b579..caf886f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1652,6 +1652,11 @@ public class AcidUtils {
}
}
+ // TODO# remove
+ public static TableSnapshot getTableSnapshot(
+ Configuration conf, Table tbl) throws LockException {
+ return getTableSnapshot(conf, tbl, false);
+ }
/**
* Create a TableShopshot with the given "conf"
* for the table of the given "tbl".
@@ -1662,8 +1667,7 @@ public class AcidUtils {
* @throws LockException
*/
public static TableSnapshot getTableSnapshot(
- Configuration conf,
- Table tbl) throws LockException {
+ Configuration conf, Table tbl, boolean isInTxnScope) throws LockException {
if (!isTransactionalTable(tbl)) {
return null;
} else {
@@ -1679,9 +1683,9 @@ public class AcidUtils {
if (txnId > 0 && isTransactionalTable(tbl)) {
validWriteIdList = getTableValidWriteIdList(conf, fullTableName);
- // TODO: we shouldn't do this during normal Hive compilation, write IDs should be in conf.
- // Can this still happen for DDLTask-based queries?
- if (validWriteIdList == null && !HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) {
+ // TODO: remove in_test filters?
+ if (validWriteIdList == null && !isInTxnScope
+ && !HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) {
LOG.warn("Obtaining write IDs from metastore for " + tbl.getTableName());
validWriteIdList = getTableValidWriteIdListWithTxnList(
conf, tbl.getDbName(), tbl.getTableName());
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 9f052ae..c0a9be0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.metadata;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
@@ -768,7 +769,7 @@ public class Hive {
try {
AcidUtils.TableSnapshot tableSnapshot = null;
if (transactional) {
- tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable());
+ tableSnapshot = AcidUtils.getTableSnapshot(conf, newParts.get(0).getTable(), true);
}
// Remove the DDL time so that it gets refreshed
for (Partition tmpPart: newParts) {
@@ -2448,12 +2449,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
* @throws HiveException
* if table doesn't exist or partition already exists
*/
+ @VisibleForTesting
public Partition createPartition(Table tbl, Map<String, String> partSpec) throws HiveException {
try {
org.apache.hadoop.hive.metastore.api.Partition part =
Partition.createMetaPartitionObject(tbl, partSpec, null);
AcidUtils.TableSnapshot tableSnapshot =
- AcidUtils.getTableSnapshot(conf, tbl);
+ AcidUtils.getTableSnapshot(conf, tbl, false);
part.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0);
part.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
return new Partition(tbl, getMSC().add_partition(part));
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 18a27c4..31041af 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -919,7 +919,7 @@ public class StatsOptimizer extends Transform {
partNames.add(part.getName());
}
AcidUtils.TableSnapshot tableSnapshot =
- AcidUtils.getTableSnapshot(hive.getConf(), tbl);
+ AcidUtils.getTableSnapshot(hive.getConf(), tbl, true);
Map<String, List<ColumnStatisticsObj>> result = hive.getMSC().getPartitionColumnStatistics(
tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName),
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
index a16ec07..1906865 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
@@ -199,8 +199,11 @@ Retention: 0
#### A masked pattern was here ####
Table Type: MANAGED_TABLE
Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\"}}
bucketing_version 2
numFiles 2
+ numRows 10
+ rawDataSize 0
totalSize 1899
transactional true
transactional_properties default
@@ -241,9 +244,11 @@ Retention: 0
#### A masked pattern was here ####
Table Type: MANAGED_TABLE
Table Parameters:
- COLUMN_STATS_ACCURATE {}
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
bucketing_version 2
numFiles 4
+ numRows 8
+ rawDataSize 0
totalSize 3275
transactional true
transactional_properties default
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
index b856b99..cfb9f1b 100644
--- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
@@ -45,22 +45,22 @@ STAGE PLANS:
alias: acidtbldefault
filterExpr: (a = 1) (type: boolean)
buckets included: [13,] of 16
- Statistics: Num rows: 1850 Data size: 7036 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 9174 Data size: 36696 Basic stats: COMPLETE Column stats: COMPLETE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: (a = 1) (type: boolean)
- Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: 1 (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
GlobalTableId: 0
directory: hdfs://### HDFS PATH ###
NumFilesPerFileSink: 1
- Statistics: Num rows: 5 Data size: 19 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Stats Publishing Key Prefix: hdfs://### HDFS PATH ###
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -88,6 +88,7 @@ STAGE PLANS:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true"}}
bucket_count 16
bucket_field_name a
bucketing_version 2
@@ -99,6 +100,8 @@ STAGE PLANS:
location hdfs://### HDFS PATH ###
name default.acidtbldefault
numFiles 17
+ numRows 9174
+ rawDataSize 0
serialization.ddl struct acidtbldefault { i32 a}
serialization.format 1
serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
@@ -111,6 +114,7 @@ STAGE PLANS:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
properties:
+ COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"a":"true"}}
bucket_count 16
bucket_field_name a
bucketing_version 2
@@ -122,6 +126,8 @@ STAGE PLANS:
location hdfs://### HDFS PATH ###
name default.acidtbldefault
numFiles 17
+ numRows 9174
+ rawDataSize 0
serialization.ddl struct acidtbldefault { i32 a}
serialization.format 1
serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
index be1b4c6..57ff575 100644
--- a/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_vectorization_original.q.out
@@ -665,22 +665,22 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: over10k_orc_bucketed
- Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: ROW__ID
- Statistics: Num rows: 1237 Data size: 707670 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 2098 Data size: 622340 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
mode: hash
outputColumnNames: _col0, _col1
- Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
- Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: llap
LLAP IO: may be used (ACID table)
@@ -692,13 +692,13 @@ STAGE PLANS:
keys: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
mode: mergepartial
outputColumnNames: _col0, _col1
- Statistics: Num rows: 618 Data size: 51912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1049 Data size: 88116 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (_col1 > 1L) (type: boolean)
- Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 206 Data size: 17304 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 349 Data size: 29316 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
index 7a9e200..6c3751d 100644
--- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out
@@ -95,19 +95,19 @@ STAGE PLANS:
TableScan
alias: acid_part
filterExpr: ((key = 'foo') and (ds = '2008-04-08')) (type: boolean)
- Statistics: Num rows: 160 Data size: 61001 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1601 Data size: 139287 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: PARTIAL
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
Execution mode: llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -116,10 +116,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 1906 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1720 Basic stats: COMPLETE Column stats: PARTIAL
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -191,7 +191,7 @@ STAGE PLANS:
TableScan
alias: acid_part
filterExpr: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean)
- Statistics: Num rows: 159 Data size: 104317 Basic stats: COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 1601 Data size: 433871 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (key = 'foo') (type: boolean)
Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL
@@ -383,19 +383,19 @@ STAGE PLANS:
TableScan
alias: acid_part_sdpo
filterExpr: ((key = 'foo') and (ds = '2008-04-08')) (type: boolean)
- Statistics: Num rows: 176 Data size: 67063 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1601 Data size: 150414 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
Execution mode: llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -404,10 +404,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 1905 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -479,7 +479,7 @@ STAGE PLANS:
TableScan
alias: acid_part_sdpo
filterExpr: ((key = 'foo') and (ds) IN ('2008-04-08')) (type: boolean)
- Statistics: Num rows: 171 Data size: 112152 Basic stats: COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 1601 Data size: 444998 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (key = 'foo') (type: boolean)
Statistics: Num rows: 5 Data size: 1355 Basic stats: COMPLETE Column stats: PARTIAL
@@ -680,19 +680,19 @@ STAGE PLANS:
TableScan
alias: acid_2l_part
filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean)
- Statistics: Num rows: 157 Data size: 60527 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1601 Data size: 139287 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 435 Basic stats: COMPLETE Column stats: PARTIAL
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
Execution mode: llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -701,10 +701,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -777,19 +777,19 @@ STAGE PLANS:
TableScan
alias: acid_2l_part
filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean)
- Statistics: Num rows: 1600 Data size: 156727 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 3201 Data size: 291291 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 910 Basic stats: COMPLETE Column stats: PARTIAL
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), hr (type: int)
outputColumnNames: _col0, _col4
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL
value expressions: _col4 (type: int)
Execution mode: llap
LLAP IO: may be used (ACID table)
@@ -799,10 +799,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), VALUE._col2 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 10 Data size: 3480 Basic stats: COMPLETE Column stats: PARTIAL
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -904,19 +904,19 @@ STAGE PLANS:
TableScan
alias: acid_2l_part
filterExpr: (value = 'bar') (type: boolean)
- Statistics: Num rows: 1600 Data size: 451127 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 4200 Data size: 1171800 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (value = 'bar') (type: boolean)
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 14 Data size: 3906 Basic stats: COMPLETE Column stats: PARTIAL
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), ds (type: string), hr (type: int)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL
value expressions: _col1 (type: string), _col2 (type: int)
Execution mode: llap
LLAP IO: may be used (ACID table)
@@ -926,10 +926,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: int)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 14 Data size: 3696 Basic stats: COMPLETE Column stats: PARTIAL
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1103,19 +1103,19 @@ STAGE PLANS:
TableScan
alias: acid_2l_part_sdpo
filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean)
- Statistics: Num rows: 157 Data size: 60527 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1601 Data size: 150414 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
Execution mode: llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -1124,10 +1124,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), 11 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3, _col4
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 1927 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 5 Data size: 469 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1200,19 +1200,19 @@ STAGE PLANS:
TableScan
alias: acid_2l_part_sdpo
filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean)
- Statistics: Num rows: 1600 Data size: 156727 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 3201 Data size: 313458 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 455 Basic stats: COMPLETE Column stats: PARTIAL
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), hr (type: int)
outputColumnNames: _col0, _col4
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
Reduce Output Operator
key expressions: '2008-04-08' (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: ++++
Map-reduce partition columns: '2008-04-08' (type: string), _col4 (type: int)
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL
Execution mode: llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -1221,11 +1221,11 @@ STAGE PLANS:
Select Operator
expressions: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 'foo' (type: string), 'bar' (type: string), '2008-04-08' (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1360 Basic stats: COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
Dp Sort State: PARTITION_BUCKET_SORTED
- Statistics: Num rows: 5 Data size: 489 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1360 Basic stats: COMPLETE Column stats: PARTIAL
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1327,19 +1327,19 @@ STAGE PLANS:
TableScan
alias: acid_2l_part_sdpo
filterExpr: (value = 'bar') (type: boolean)
- Statistics: Num rows: 1600 Data size: 451127 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 4952 Data size: 2061430 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (value = 'bar') (type: boolean)
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1375 Basic stats: COMPLETE Column stats: PARTIAL
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), ds (type: string), hr (type: int)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL
Reduce Output Operator
key expressions: _col1 (type: string), _col2 (type: int), '_bucket_number' (type: string), _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: ++++
Map-reduce partition columns: _col1 (type: string), _col2 (type: int)
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1320 Basic stats: COMPLETE Column stats: PARTIAL
Execution mode: llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -1348,11 +1348,11 @@ STAGE PLANS:
Select Operator
expressions: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), KEY._col1 (type: string), KEY._col2 (type: int), KEY.'_bucket_number' (type: string)
outputColumnNames: _col0, _col1, _col2, '_bucket_number'
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1810 Basic stats: COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
Dp Sort State: PARTITION_BUCKET_SORTED
- Statistics: Num rows: 5 Data size: 1409 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1810 Basic stats: COMPLETE Column stats: PARTIAL
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1526,7 +1526,7 @@ STAGE PLANS:
TableScan
alias: acid_2l_part_sdpo_no_cp
filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr = 11)) (type: boolean)
- Statistics: Num rows: 97 Data size: 82922 Basic stats: COMPLETE Column stats: PARTIAL
+ Statistics: Num rows: 1601 Data size: 599036 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (key = 'foo') (type: boolean)
Statistics: Num rows: 5 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL
@@ -1625,19 +1625,19 @@ STAGE PLANS:
TableScan
alias: acid_2l_part_sdpo_no_cp
filterExpr: ((key = 'foo') and (ds = '2008-04-08') and (hr >= 11)) (type: boolean)
- Statistics: Num rows: 1600 Data size: 598664 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 3201 Data size: 1197516 Basic stats: COMPLETE Column stats: PARTIAL
Filter Operator
predicate: (key = 'foo') (type: boolean)
- Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), key (type: string), ds (type: string), hr (type: int)
outputColumnNames: _col0, _col1, _col3, _col4
- Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 2675 Basic stats: COMPLETE Column stats: PARTIAL
Reduce Output Operator
key expressions: _col3 (type: string), _col4 (type: int), '_bucket_number' (type: string), _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: ++++
Map-reduce partition columns: _col3 (type: string), _col4 (type: int)
- Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 2675 Basic stats: COMPLETE Column stats: PARTIAL
value expressions: _col1 (type: string), 'bar' (type: string)
Execution mode: llap
LLAP IO: may be used (ACID table)
@@ -1647,11 +1647,11 @@ STAGE PLANS:
Select Operator
expressions: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY._col3 (type: string), KEY._col4 (type: int), KEY.'_bucket_number' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, '_bucket_number'
- Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 3165 Basic stats: COMPLETE Column stats: PARTIAL
File Output Operator
compressed: false
Dp Sort State: PARTITION_BUCKET_SORTED
- Statistics: Num rows: 5 Data size: 1870 Basic stats: PARTIAL Column stats: PARTIAL
+ Statistics: Num rows: 5 Data size: 3165 Basic stats: COMPLETE Column stats: PARTIAL
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
index 8a5a326..ecf79ae 100644
--- a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
+++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
@@ -3237,19 +3237,19 @@ STAGE PLANS:
TableScan
alias: acid_uami_n1
filterExpr: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean)
- Statistics: Num rows: 281 Data size: 87904 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1002 Data size: 225450 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (((de = 109.23) or (de = 119.23)) and enforce_constraint(vc is not null)) (type: boolean)
- Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 675 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), i (type: int), vc (type: varchar(128))
outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: int), _col3 (type: varchar(128))
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
@@ -3259,10 +3259,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128))
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 1564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -3331,19 +3331,19 @@ STAGE PLANS:
TableScan
alias: acid_uami_n1
filterExpr: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean)
- Statistics: Num rows: 320 Data size: 100040 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1002 Data size: 225450 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: ((de = 3.14) and enforce_constraint((i is not null and vc is not null))) (type: boolean)
- Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 225 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), i (type: int), vc (type: varchar(128))
outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: int), _col3 (type: varchar(128))
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
@@ -3353,10 +3353,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), 3.14 (type: decimal(5,2)), VALUE._col1 (type: varchar(128))
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 2 Data size: 625 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/798ff7d2/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
index cd38c51..a93593f 100644
--- a/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
+++ b/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
@@ -1705,19 +1705,19 @@ STAGE PLANS:
TableScan
alias: insert_into1_n0
filterExpr: (value = 1) (type: boolean)
- Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (value = 1) (type: boolean)
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), value (type: string), i (type: int)
outputColumnNames: _col0, _col2, _col3
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: string), _col3 (type: int)
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
@@ -1727,10 +1727,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 1 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 169 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1813,19 +1813,19 @@ STAGE PLANS:
TableScan
alias: insert_into1_n0
filterExpr: (value = 1) (type: boolean)
- Statistics: Num rows: 25 Data size: 4700 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (value = 1) (type: boolean)
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), i (type: int)
outputColumnNames: _col0, _col3
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col3 (type: int)
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
@@ -1835,10 +1835,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), 1 (type: int), null (type: string), VALUE._col0 (type: int)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2371,15 +2371,15 @@ STAGE PLANS:
TableScan
alias: t
filterExpr: enforce_constraint(key is not null) (type: boolean)
- Statistics: Num rows: 20 Data size: 80 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: enforce_constraint(key is not null) (type: boolean)
- Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: key (type: int)
sort order: +
Map-reduce partition columns: key (type: int)
- Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
Map 5
@@ -2408,18 +2408,18 @@ STAGE PLANS:
0 key (type: int)
1 key (type: int)
outputColumnNames: _col0, _col6
- Statistics: Num rows: 11 Data size: 44 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: _col0 is null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col6 (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
sort order:
Map-reduce partition columns: null (type: string)
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int)
Reducer 3
Execution mode: llap
@@ -2427,10 +2427,10 @@ STAGE PLANS:
Select Operator
expressions: VALUE._col0 (type: int), 'a1' (type: string), null (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2440,15 +2440,15 @@ STAGE PLANS:
Select Operator
expressions: _col0 (type: int), 'a1' (type: string), null (type: string)
outputColumnNames: key, a1, value
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: compute_stats(key, 'hll'), compute_stats(a1, 'hll'), compute_stats(value, 'hll')
mode: hash
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
Reducer 4
Execution mode: llap
@@ -2457,10 +2457,10 @@ STAGE PLANS:
aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2569,12 +2569,12 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: t
- Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: key (type: int)
sort order: +
Map-reduce partition columns: key (type: int)
- Statistics: Num rows: 20 Data size: 3760 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: value (type: string), ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
@@ -2601,62 +2601,62 @@ STAGE PLANS:
0 key (type: int)
1 key (type: int)
outputColumnNames: _col0, _col2, _col5, _col6, _col7
- Statistics: Num rows: 22 Data size: 4136 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 2 Data size: 432 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: ((_col0 = _col6) and (_col6 < 3)) (type: boolean)
- Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col5 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: ((_col0 = _col6) and (_col6 > 3) and (_col6 >= 3) and enforce_constraint(_col0 is not null)) (type: boolean)
- Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col5 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), _col0 (type: int), _col2 (type: string)
outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: UDFToInteger(_col0) (type: int)
- Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: int), _col3 (type: string)
Filter Operator
predicate: (_col0 = _col6) (type: boolean)
- Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col5 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col5
- Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: _col5 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
mode: hash
outputColumnNames: _col0, _col1
- Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
sort order: +
Map-reduce partition columns: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
- Statistics: Num rows: 11 Data size: 2068 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Filter Operator
predicate: (_col0 is null and enforce_constraint(_col6 is not null)) (type: boolean)
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 261 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col6 (type: int), _col7 (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
sort order:
Map-reduce partition columns: null (type: string)
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Reducer 3
Execution mode: vectorized, llap
@@ -2664,10 +2664,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
outputColumnNames: _col0
- Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2680,10 +2680,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: int), 'a1' (type: string), VALUE._col1 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2698,17 +2698,17 @@ STAGE PLANS:
keys: KEY._col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
mode: mergepartial
outputColumnNames: _col0, _col1
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (_col1 > 1L) (type: boolean)
- Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: cardinality_violation(_col0) (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -2717,19 +2717,19 @@ STAGE PLANS:
Select Operator
expressions: _col0 (type: int)
outputColumnNames: val
- Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: compute_stats(val, 'hll')
mode: complete
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>)
outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 432 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2740,10 +2740,10 @@ STAGE PLANS:
Select Operator
expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), null (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -2753,15 +2753,15 @@ STAGE PLANS:
Select Operator
expressions: _col0 (type: int), _col1 (type: string), null (type: string)
outputColumnNames: key, a1, value
- Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 175 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: compute_stats(key, 'hll'), compute_stats(a1, 'hll'), compute_stats(value, 'hll')
mode: hash
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
sort order:
- Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
Reducer 7
Execution mode: llap
@@ -2770,10 +2770,10 @@ STAGE PLANS:
aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat